chore: bump version to 0.3.0

This commit is contained in:
2026-03-05 20:18:53 -08:00
parent a5554ec530
commit 2f07c3407a
20 changed files with 160 additions and 75 deletions

View File

@@ -1,6 +1,6 @@
--- ---
id: m-0 id: m-0
title: "Codebase Health Remediation" title: 'Codebase Health Remediation'
--- ---
## Description ## Description

View File

@@ -33,11 +33,15 @@ priority: medium
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
Move AniSkip MAL/title-to-MAL lookup and intro payload resolution from mpv Lua to launcher Electron flow, while keeping mpv-side intro skip UX and chapter/chapter prompt behavior in plugin. Launcher should infer/analyze file metadata, fetch AniSkip payload when launching files, and pass resolved skip window via script options; plugin should trust launcher payload and fall back only when absent. Move AniSkip MAL/title-to-MAL lookup and intro payload resolution from mpv Lua to launcher Electron flow, while keeping mpv-side intro skip UX and chapter/chapter prompt behavior in plugin. Launcher should infer/analyze file metadata, fetch AniSkip payload when launching files, and pass resolved skip window via script options; plugin should trust launcher payload and fall back only when absent.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [x] #1 Launcher infers AniSkip metadata for file targets using existing guessit/fallback logic and performs AniSkip MAL + payload resolution during mpv startup. - [x] #1 Launcher infers AniSkip metadata for file targets using existing guessit/fallback logic and performs AniSkip MAL + payload resolution during mpv startup.
- [x] #2 Launcher injects script options containing resolved MAL id and intro window fields (or explicit lookup-failure status) into mpv startup. - [x] #2 Launcher injects script options containing resolved MAL id and intro window fields (or explicit lookup-failure status) into mpv startup.
- [x] #3 Lua plugin consumes launcher-provided AniSkip intro data and skips all network lookups when payload is present. - [x] #3 Lua plugin consumes launcher-provided AniSkip intro data and skips all network lookups when payload is present.
@@ -49,15 +53,18 @@ Move AniSkip MAL/title-to-MAL lookup and intro payload resolution from mpv Lua t
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1) Add launcher-side AniSkip payload resolution helpers in launcher/aniskip-metadata.ts (MAL prefix lookup + AniSkip payload fetch + result normalization).
2) Wire launcher/mpv.ts + buildSubminerScriptOpts to pass resolved AniSkip fields/mode in --script-opts for file playback. 1. Add launcher-side AniSkip payload resolution helpers in launcher/aniskip-metadata.ts (MAL prefix lookup + AniSkip payload fetch + result normalization).
3) Update plugin/subminer/aniskip.lua plus options/state to consume injected payload: if intro_start/end present, apply immediately and skip network lookup; otherwise retain existing async behavior. 2. Wire launcher/mpv.ts + buildSubminerScriptOpts to pass resolved AniSkip fields/mode in --script-opts for file playback.
4) Ensure fallback for standalone mpv usage remains intact for no-launcher/manual refresh. 3. Update plugin/subminer/aniskip.lua plus options/state to consume injected payload: if intro_start/end present, apply immediately and skip network lookup; otherwise retain existing async behavior.
5) Add/update tests/docs/config references for new script-opt contract and edge cases. 4. Ensure fallback for standalone mpv usage remains intact for no-launcher/manual refresh.
5. Add/update tests/docs/config references for new script-opt contract and edge cases.
<!-- SECTION:PLAN:END --> <!-- SECTION:PLAN:END -->
## Final Summary ## Final Summary
<!-- SECTION:FINAL_SUMMARY:BEGIN --> <!-- SECTION:FINAL_SUMMARY:BEGIN -->
Executed end-to-end migration so launcher resolves AniSkip title/MAL/payload before mpv start and injects it via --script-opts. Plugin now parses and consumes launcher payload (JSON/url/base64), applies OP intro from payload, tracks payload metadata in state, and keeps legacy async lookup path for non-launcher/absent payload playback. Added launcher config key aniskip_payload and updated launcher/aniskip-metadata tests for resolve/payload behavior and contract validation. Executed end-to-end migration so launcher resolves AniSkip title/MAL/payload before mpv start and injects it via --script-opts. Plugin now parses and consumes launcher payload (JSON/url/base64), applies OP intro from payload, tracks payload metadata in state, and keeps legacy async lookup path for non-launcher/absent payload playback. Added launcher config key aniskip_payload and updated launcher/aniskip-metadata tests for resolve/payload behavior and contract validation.
<!-- SECTION:FINAL_SUMMARY:END --> <!-- SECTION:FINAL_SUMMARY:END -->

View File

@@ -38,6 +38,7 @@ ordinal: 13000
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
Add true keyboard-driven token lookup flow in overlay: Add true keyboard-driven token lookup flow in overlay:
- Toggle keyboard token-selection mode and navigate tokens by keyboard (`Arrow` + `HJKL`). - Toggle keyboard token-selection mode and navigate tokens by keyboard (`Arrow` + `HJKL`).
@@ -47,7 +48,9 @@ Add true keyboard-driven token lookup flow in overlay:
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [x] #1 Keyboard mode toggle exists and shows visual selection outline for active token. - [x] #1 Keyboard mode toggle exists and shows visual selection outline for active token.
- [x] #2 Navigation works via arrows and vim keys while keyboard mode is enabled. - [x] #2 Navigation works via arrows and vim keys while keyboard mode is enabled.
- [x] #3 Lookup window toggles from selected token with `Ctrl/Cmd+Y`; close path restores overlay keyboard focus. - [x] #3 Lookup window toggles from selected token with `Ctrl/Cmd+Y`; close path restores overlay keyboard focus.
@@ -59,5 +62,7 @@ Add true keyboard-driven token lookup flow in overlay:
## Final Summary ## Final Summary
<!-- SECTION:FINAL_SUMMARY:BEGIN --> <!-- SECTION:FINAL_SUMMARY:BEGIN -->
Implemented keyboard-driven Yomitan workflow end-to-end in renderer + bundled Yomitan runtime bridge. Added overlay-level keyboard mode state, token selection sync, lookup toggle routing, popup command forwarding, and focus recovery after popup close. Follow-up fixes kept lookup open while moving between tokens, made popup-local `J/K` and `ArrowUp/ArrowDown` scroll work from overlay-owned focus with key repeat, skipped keyboard/token annotation flow for parser groups that have no dictionary-backed headword, and preserved paused playback when token navigation jumps across subtitle lines. Updated user docs/README to document the final shortcut behavior. Implemented keyboard-driven Yomitan workflow end-to-end in renderer + bundled Yomitan runtime bridge. Added overlay-level keyboard mode state, token selection sync, lookup toggle routing, popup command forwarding, and focus recovery after popup close. Follow-up fixes kept lookup open while moving between tokens, made popup-local `J/K` and `ArrowUp/ArrowDown` scroll work from overlay-owned focus with key repeat, skipped keyboard/token annotation flow for parser groups that have no dictionary-backed headword, and preserved paused playback when token navigation jumps across subtitle lines. Updated user docs/README to document the final shortcut behavior.
<!-- SECTION:FINAL_SUMMARY:END --> <!-- SECTION:FINAL_SUMMARY:END -->

View File

@@ -30,11 +30,15 @@ priority: high
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
Track the remediation work from the March 6, 2026 code review. The review found that the default test gate only exercises 53 of 241 test files, the dedicated subtitle test lane is a no-op, SQLite-backed immersion tracking tests are conditionally skipped in the standard Bun run, src/main.ts still contains a large dead-symbol backlog, several registry/pipeline modules appear unreferenced from live execution paths, and src/anki-integration.ts remains an oversized orchestration file. This parent task should coordinate a safe sequence: improve verification first, then remove dead code and continue decomposition with good test coverage in place. Track the remediation work from the March 6, 2026 code review. The review found that the default test gate only exercises 53 of 241 test files, the dedicated subtitle test lane is a no-op, SQLite-backed immersion tracking tests are conditionally skipped in the standard Bun run, src/main.ts still contains a large dead-symbol backlog, several registry/pipeline modules appear unreferenced from live execution paths, and src/anki-integration.ts remains an oversized orchestration file. This parent task should coordinate a safe sequence: improve verification first, then remove dead code and continue decomposition with good test coverage in place.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 Child tasks are created for each remediation workstream with explicit dependencies and enough context for an isolated agent to execute them. - [ ] #1 Child tasks are created for each remediation workstream with explicit dependencies and enough context for an isolated agent to execute them.
- [ ] #2 The parent task records the recommended sequencing and parallelization strategy so replacement agents can resume without conversation history. - [ ] #2 The parent task records the recommended sequencing and parallelization strategy so replacement agents can resume without conversation history.
- [ ] #3 Completion of the parent task leaves the repository with a materially more trustworthy test gate, less dead architecture, and clearer ownership boundaries for the main runtime and Anki integration surfaces. - [ ] #3 Completion of the parent task leaves the repository with a materially more trustworthy test gate, less dead architecture, and clearer ownership boundaries for the main runtime and Anki integration surfaces.
@@ -43,7 +47,9 @@ Track the remediation work from the March 6, 2026 code review. The review found
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
Recommended sequencing: Recommended sequencing:
1. Run TASK-87.1, TASK-87.2, TASK-87.3, and TASK-87.7 first. These are the safety-net and tooling tasks and can largely proceed in parallel. 1. Run TASK-87.1, TASK-87.2, TASK-87.3, and TASK-87.7 first. These are the safety-net and tooling tasks and can largely proceed in parallel.
2. Start TASK-87.4 once TASK-87.1 lands so src/main.ts cleanup happens under a more trustworthy test matrix. 2. Start TASK-87.4 once TASK-87.1 lands so src/main.ts cleanup happens under a more trustworthy test matrix.
3. Start TASK-87.5 after TASK-87.1 and TASK-87.2 so dead subsync/pipeline cleanup happens with stronger subtitle and runtime verification. 3. Start TASK-87.5 after TASK-87.1 and TASK-87.2 so dead subsync/pipeline cleanup happens with stronger subtitle and runtime verification.
@@ -51,10 +57,12 @@ Recommended sequencing:
5. Keep PRs focused: do not combine verification work with architectural cleanup unless a narrow dependency requires it. 5. Keep PRs focused: do not combine verification work with architectural cleanup unless a narrow dependency requires it.
Parallelization guidance: Parallelization guidance:
- Wave 1 parallel: TASK-87.1, TASK-87.2, TASK-87.3, TASK-87.7 - Wave 1 parallel: TASK-87.1, TASK-87.2, TASK-87.3, TASK-87.7
- Wave 2 parallel: TASK-87.4, TASK-87.5, TASK-87.6 - Wave 2 parallel: TASK-87.4, TASK-87.5, TASK-87.6
Shared review context to restate in child tasks: Shared review context to restate in child tasks:
- Standard test scripts currently reference only 53 unique test files out of 241 discovered test and type-test files under src/ and launcher/. - Standard test scripts currently reference only 53 unique test files out of 241 discovered test and type-test files under src/ and launcher/.
- test:subtitle is currently a placeholder echo even though subtitle sync is a user-facing feature. - test:subtitle is currently a placeholder echo even though subtitle sync is a user-facing feature.
- SQLite-backed immersion tracker tests are conditionally skipped in the standard Bun run. - SQLite-backed immersion tracker tests are conditionally skipped in the standard Bun run.

View File

@@ -27,11 +27,15 @@ priority: high
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
The current package scripts hand-enumerate a small subset of test files, which leaves the standard green signal misleading. A local audit found 241 test/type-test files under src/ and launcher/, but only 53 unique files referenced by the standard package.json test scripts. This task should redesign the runnable test matrix so maintained tests are either executed by the standard commands or intentionally excluded through a documented rule, instead of silently drifting out of coverage. The current package scripts hand-enumerate a small subset of test files, which leaves the standard green signal misleading. A local audit found 241 test/type-test files under src/ and launcher/, but only 53 unique files referenced by the standard package.json test scripts. This task should redesign the runnable test matrix so maintained tests are either executed by the standard commands or intentionally excluded through a documented rule, instead of silently drifting out of coverage.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 The repository has a documented and reproducible test matrix for standard development commands, including which suites belong in the default lane versus slower or environment-specific lanes. - [ ] #1 The repository has a documented and reproducible test matrix for standard development commands, including which suites belong in the default lane versus slower or environment-specific lanes.
- [ ] #2 The standard test entrypoints stop relying on a brittle hand-maintained allowlist for the currently covered unit and integration suites, or an explicit documented mechanism exists that prevents silent omission of new tests. - [ ] #2 The standard test entrypoints stop relying on a brittle hand-maintained allowlist for the currently covered unit and integration suites, or an explicit documented mechanism exists that prevents silent omission of new tests.
- [ ] #3 Representative tests that were previously outside the standard lane from src/main/runtime, src/anki-integration, and entry/runtime surfaces are executed by an automated command and included in the documented matrix. - [ ] #3 Representative tests that were previously outside the standard lane from src/main/runtime, src/anki-integration, and entry/runtime surfaces are executed by an automated command and included in the documented matrix.
@@ -41,6 +45,7 @@ The current package scripts hand-enumerate a small subset of test files, which l
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Inventory the current test surface under src/ and launcher/ and compare it to package.json scripts to classify fast, full, slow, and environment-specific suites. 1. Inventory the current test surface under src/ and launcher/ and compare it to package.json scripts to classify fast, full, slow, and environment-specific suites.
2. Replace or reduce the brittle hand-maintained allowlist so new maintained tests do not silently miss the standard matrix. 2. Replace or reduce the brittle hand-maintained allowlist so new maintained tests do not silently miss the standard matrix.
3. Update contributor docs with the intended fast/full/environment-specific commands. 3. Update contributor docs with the intended fast/full/environment-specific commands.

View File

@@ -27,11 +27,15 @@ priority: high
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
SubMiner advertises subtitle syncing with alass and ffsubsync, but the dedicated test:subtitle command currently does not run any tests. There is already lower-level coverage in src/core/services/subsync.test.ts, but the test matrix and contributor-facing commands do not reflect that reality. This task should replace the no-op lane with real verification, align scripts with the existing subsync test surface, and make the user-facing docs honest about how subtitle sync is verified. SubMiner advertises subtitle syncing with alass and ffsubsync, but the dedicated test:subtitle command currently does not run any tests. There is already lower-level coverage in src/core/services/subsync.test.ts, but the test matrix and contributor-facing commands do not reflect that reality. This task should replace the no-op lane with real verification, align scripts with the existing subsync test surface, and make the user-facing docs honest about how subtitle sync is verified.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 The test:subtitle entrypoint runs real automated verification instead of echoing a placeholder message. - [ ] #1 The test:subtitle entrypoint runs real automated verification instead of echoing a placeholder message.
- [ ] #2 The subtitle verification lane covers both alass and ffsubsync behavior, including at least one non-happy-path scenario relevant to current functionality. - [ ] #2 The subtitle verification lane covers both alass and ffsubsync behavior, including at least one non-happy-path scenario relevant to current functionality.
- [ ] #3 Contributor-facing documentation points to the real subtitle verification command and no longer implies a dedicated test lane exists when it does not. - [ ] #3 Contributor-facing documentation points to the real subtitle verification command and no longer implies a dedicated test lane exists when it does not.
@@ -41,6 +45,7 @@ SubMiner advertises subtitle syncing with alass and ffsubsync, but the dedicated
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Audit the existing subtitle-sync test surface, especially src/core/services/subsync.test.ts, and decide whether test:subtitle should reuse or regroup that coverage. 1. Audit the existing subtitle-sync test surface, especially src/core/services/subsync.test.ts, and decide whether test:subtitle should reuse or regroup that coverage.
2. Replace the placeholder script with a real automated command and keep the matrix legible alongside TASK-87.1 work. 2. Replace the placeholder script with a real automated command and keep the matrix legible alongside TASK-87.1 work.
3. Update README or related docs so the advertised subtitle verification path matches reality. 3. Update README or related docs so the advertised subtitle verification path matches reality.

View File

@@ -26,11 +26,15 @@ priority: medium
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
The immersion tracker is persistence-heavy, but its SQLite-backed tests are conditionally skipped in the standard Bun run when node:sqlite support is unavailable. That creates a blind spot around session finalization, telemetry persistence, and retention behavior. This task should establish a reliable automated verification path for the database-backed cases and make the prerequisite/runtime behavior explicit to contributors and CI. The immersion tracker is persistence-heavy, but its SQLite-backed tests are conditionally skipped in the standard Bun run when node:sqlite support is unavailable. That creates a blind spot around session finalization, telemetry persistence, and retention behavior. This task should establish a reliable automated verification path for the database-backed cases and make the prerequisite/runtime behavior explicit to contributors and CI.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 Database-backed immersion tracking tests run in at least one documented automated command that is practical for contributors or CI to execute. - [ ] #1 Database-backed immersion tracking tests run in at least one documented automated command that is practical for contributors or CI to execute.
- [ ] #2 If the current runtime cannot execute the SQLite-backed tests, the repository exposes that limitation clearly instead of silently reporting a misleading green result. - [ ] #2 If the current runtime cannot execute the SQLite-backed tests, the repository exposes that limitation clearly instead of silently reporting a misleading green result.
- [ ] #3 Contributor-facing documentation explains how to run the immersion tracker verification lane and any environment prerequisites it depends on. - [ ] #3 Contributor-facing documentation explains how to run the immersion tracker verification lane and any environment prerequisites it depends on.
@@ -40,6 +44,7 @@ The immersion tracker is persistence-heavy, but its SQLite-backed tests are cond
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Confirm which SQLite-backed immersion tests are currently skipped and why in the standard Bun environment. 1. Confirm which SQLite-backed immersion tests are currently skipped and why in the standard Bun environment.
2. Establish a reproducible command or lane for the DB-backed cases, or make the unsupported-runtime limitation explicit and actionable. 2. Establish a reproducible command or lane for the DB-backed cases, or make the unsupported-runtime limitation explicit and actionable.
3. Document prerequisites and expected behavior for contributors and CI. 3. Document prerequisites and expected behavior for contributors and CI.

View File

@@ -27,11 +27,15 @@ priority: high
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
A noUnusedLocals/noUnusedParameters compile pass reports a large concentration of dead imports and dead locals in src/main.ts. The file is also far beyond the repos preferred size guideline, which makes the runtime composition root difficult to review and easy to break. This task should remove confirmed dead symbols, continue extracting coherent slices where that improves readability, and leave the entrypoint materially easier to understand without changing behavior. A noUnusedLocals/noUnusedParameters compile pass reports a large concentration of dead imports and dead locals in src/main.ts. The file is also far beyond the repos preferred size guideline, which makes the runtime composition root difficult to review and easy to break. This task should remove confirmed dead symbols, continue extracting coherent slices where that improves readability, and leave the entrypoint materially easier to understand without changing behavior.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 src/main.ts no longer emits dead-symbol diagnostics under a noUnusedLocals/noUnusedParameters compile pass for the areas touched by this cleanup. - [ ] #1 src/main.ts no longer emits dead-symbol diagnostics under a noUnusedLocals/noUnusedParameters compile pass for the areas touched by this cleanup.
- [ ] #2 Unused imports, destructured values, and stale locals identified in the current composition root are removed or relocated without behavior changes. - [ ] #2 Unused imports, destructured values, and stale locals identified in the current composition root are removed or relocated without behavior changes.
- [ ] #3 The resulting composition root has clearer ownership boundaries for at least one runtime slice that is currently buried in the monolith. - [ ] #3 The resulting composition root has clearer ownership boundaries for at least one runtime slice that is currently buried in the monolith.
@@ -41,6 +45,7 @@ A noUnusedLocals/noUnusedParameters compile pass reports a large concentration o
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Re-run the noUnusedLocals/noUnusedParameters compile pass and capture the src/main.ts diagnostics cluster before editing. 1. Re-run the noUnusedLocals/noUnusedParameters compile pass and capture the src/main.ts diagnostics cluster before editing.
2. Remove dead imports, destructured values, and stale locals in small reviewable slices; extract a coherent helper/module only where that materially reduces coupling. 2. Remove dead imports, destructured values, and stale locals in small reviewable slices; extract a coherent helper/module only where that materially reduces coupling.
3. Keep changes behavior-preserving and avoid mixing unrelated cleanup outside src/main.ts unless required to compile. 3. Keep changes behavior-preserving and avoid mixing unrelated cleanup outside src/main.ts unless required to compile.

View File

@@ -31,11 +31,15 @@ priority: high
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
The review found several modules that appear self-contained but unused from the applications live execution paths: src/translators/index.ts, src/subsync/engines.ts, src/subtitle/pipeline.ts, src/tokenizers/index.ts, and src/token-mergers/index.ts. At the same time, the real runtime behavior is implemented elsewhere. This task should verify those modules are truly unused, remove or consolidate them, and clean up any stale exports, docs, or tests so contributors are not misled by duplicate architecture. The review found several modules that appear self-contained but unused from the applications live execution paths: src/translators/index.ts, src/subsync/engines.ts, src/subtitle/pipeline.ts, src/tokenizers/index.ts, and src/token-mergers/index.ts. At the same time, the real runtime behavior is implemented elsewhere. This task should verify those modules are truly unused, remove or consolidate them, and clean up any stale exports, docs, or tests so contributors are not misled by duplicate architecture.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 Each candidate module identified in the review is either removed as dead code or justified and reconnected to a real supported execution path. - [ ] #1 Each candidate module identified in the review is either removed as dead code or justified and reconnected to a real supported execution path.
- [ ] #2 Any stale exports, imports, or tests associated with the removed or consolidated modules are cleaned up so the codebase has a single obvious path for the affected behavior. - [ ] #2 Any stale exports, imports, or tests associated with the removed or consolidated modules are cleaned up so the codebase has a single obvious path for the affected behavior.
- [ ] #3 The cleanup does not regress live tokenization or subtitle sync behavior and the relevant verification commands remain green. - [ ] #3 The cleanup does not regress live tokenization or subtitle sync behavior and the relevant verification commands remain green.
@@ -45,6 +49,7 @@ The review found several modules that appear self-contained but unused from the
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Re-verify each candidate module is off the live path by tracing imports from current runtime entrypoints before deleting anything. 1. Re-verify each candidate module is off the live path by tracing imports from current runtime entrypoints before deleting anything.
2. Remove or consolidate truly dead modules and clean associated exports/imports/tests so only the supported path remains obvious. 2. Remove or consolidate truly dead modules and clean associated exports/imports/tests so only the supported path remains obvious.
3. Pay special attention to subtitle sync and tokenization surfaces, since duplicate architecture exists near active code. 3. Pay special attention to subtitle sync and tokenization surfaces, since duplicate architecture exists near active code.

View File

@@ -31,11 +31,15 @@ priority: medium
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
src/anki-integration.ts remains an oversized orchestration file even after earlier extractions. It still mixes config normalization, polling setup, media generation, duplicate resolution, field grouping workflows, and user feedback coordination in one class. This task should continue the decomposition so the remaining orchestration surface is smaller and easier to reason about, while preserving existing Anki, proxy, field grouping, and note update behavior. src/anki-integration.ts remains an oversized orchestration file even after earlier extractions. It still mixes config normalization, polling setup, media generation, duplicate resolution, field grouping workflows, and user feedback coordination in one class. This task should continue the decomposition so the remaining orchestration surface is smaller and easier to reason about, while preserving existing Anki, proxy, field grouping, and note update behavior.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 The responsibilities currently concentrated in src/anki-integration.ts are split into clearer modules or services with narrow ownership boundaries. - [ ] #1 The responsibilities currently concentrated in src/anki-integration.ts are split into clearer modules or services with narrow ownership boundaries.
- [ ] #2 The resulting orchestration surface is materially smaller and easier to review, with at least one mixed-responsibility cluster extracted behind a well-named interface. - [ ] #2 The resulting orchestration surface is materially smaller and easier to review, with at least one mixed-responsibility cluster extracted behind a well-named interface.
- [ ] #3 Existing Anki integration behavior remains covered by automated verification, including note update, field grouping, and proxy-related flows that the refactor touches. - [ ] #3 Existing Anki integration behavior remains covered by automated verification, including note update, field grouping, and proxy-related flows that the refactor touches.
@@ -45,6 +49,7 @@ src/anki-integration.ts remains an oversized orchestration file even after earli
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Map the remaining responsibility clusters inside src/anki-integration.ts and choose one or more extraction seams that reduce mixed concerns without changing behavior. 1. Map the remaining responsibility clusters inside src/anki-integration.ts and choose one or more extraction seams that reduce mixed concerns without changing behavior.
2. Move logic behind narrow interfaces/modules rather than creating another giant helper; keep orchestration readable. 2. Move logic behind narrow interfaces/modules rather than creating another giant helper; keep orchestration readable.
3. Preserve coverage for field grouping, note update, proxy, and card creation flows touched by the refactor. 3. Preserve coverage for field grouping, note update, proxy, and card creation flows touched by the refactor.

View File

@@ -25,11 +25,15 @@ priority: low
## Description ## Description
<!-- SECTION:DESCRIPTION:BEGIN --> <!-- SECTION:DESCRIPTION:BEGIN -->
The review found a few low-risk but recurring hygiene issues: docs:watch depends on bunx concurrently even though concurrently is not declared in package metadata, and small stale API surface remains after recent refactors, such as unused parameters in field-grouping workflow code. This task should make the developer workflow reproducible and clean up low-risk stale symbols that do not warrant a dedicated architecture task. The review found a few low-risk but recurring hygiene issues: docs:watch depends on bunx concurrently even though concurrently is not declared in package metadata, and small stale API surface remains after recent refactors, such as unused parameters in field-grouping workflow code. This task should make the developer workflow reproducible and clean up low-risk stale symbols that do not warrant a dedicated architecture task.
<!-- SECTION:DESCRIPTION:END --> <!-- SECTION:DESCRIPTION:END -->
## Acceptance Criteria ## Acceptance Criteria
<!-- AC:BEGIN --> <!-- AC:BEGIN -->
- [ ] #1 The docs:watch workflow runs through declared project tooling or is rewritten to avoid undeclared dependencies. - [ ] #1 The docs:watch workflow runs through declared project tooling or is rewritten to avoid undeclared dependencies.
- [ ] #2 Small stale symbols or parameters identified during the review outside the main composition-root cleanup are removed without behavior changes. - [ ] #2 Small stale symbols or parameters identified during the review outside the main composition-root cleanup are removed without behavior changes.
- [ ] #3 Any contributor-facing command changes are reflected in repository documentation. - [ ] #3 Any contributor-facing command changes are reflected in repository documentation.
@@ -39,6 +43,7 @@ The review found a few low-risk but recurring hygiene issues: docs:watch depends
## Implementation Plan ## Implementation Plan
<!-- SECTION:PLAN:BEGIN --> <!-- SECTION:PLAN:BEGIN -->
1. Fix the docs:watch workflow so it relies on declared project tooling or an equivalent checked-in command path. 1. Fix the docs:watch workflow so it relies on declared project tooling or an equivalent checked-in command path.
2. Clean up low-risk stale symbols surfaced by the review outside the main.ts architecture task, such as unused parameters left behind by refactors. 2. Clean up low-risk stale symbols surfaced by the review outside the main.ts architecture task, such as unused parameters left behind by refactors.
3. Keep the task scoped: avoid pulling in main composition-root cleanup or larger Anki/runtime refactors. 3. Keep the task scoped: avoid pulling in main composition-root cleanup or larger Anki/runtime refactors.

View File

@@ -779,7 +779,7 @@ Sync the active subtitle track using `alass` (preferred) or `ffsubsync`:
``` ```
| Option | Values | Description | | Option | Values | Description |
| ---------------- | -------------------- | ----------------------------------------------------------------------------------------------------------- | | ---------------- | -------------------- | ------------------------------------------------------------------------------------------------------------------------- |
| `defaultMode` | `"auto"`, `"manual"` | `auto`: try `alass` against secondary subtitle, then fallback to `ffsubsync`; `manual`: open overlay picker | | `defaultMode` | `"auto"`, `"manual"` | `auto`: try `alass` against secondary subtitle, then fallback to `ffsubsync`; `manual`: open overlay picker |
| `alass_path` | string path | Path to `alass` executable. Empty or `null` falls back to `/usr/bin/alass`. | | `alass_path` | string path | Path to `alass` executable. Empty or `null` falls back to `/usr/bin/alass`. |
| `ffsubsync_path` | string path | Path to `ffsubsync` executable. Empty or `null` falls back to `/usr/bin/ffsubsync`. | | `ffsubsync_path` | string path | Path to `ffsubsync` executable. Empty or `null` falls back to `/usr/bin/ffsubsync`. |

View File

@@ -121,7 +121,7 @@ aniskip_button_duration=3
### Option Reference ### Option Reference
| Option | Default | Values | Description | | Option | Default | Values | Description |
| ------------------------------ | ----------------------------- | ------------------------------------------ | -------------------------------------------------------------------------------- | | ------------------------------ | ----------------------------- | ------------------------------------------ | ------------------------------------------------------------------------------------------ |
| `binary_path` | `""` (auto-detect) | file path | Path to SubMiner binary | | `binary_path` | `""` (auto-detect) | file path | Path to SubMiner binary |
| `socket_path` | `/tmp/subminer-socket` | file path | MPV IPC socket path | | `socket_path` | `/tmp/subminer-socket` | file path | MPV IPC socket path |
| `texthooker_enabled` | `yes` | `yes` / `no` | Enable texthooker server | | `texthooker_enabled` | `yes` | `yes` / `no` | Enable texthooker server |

View File

@@ -63,7 +63,7 @@ Mouse-hover playback behavior is configured separately from shortcuts: `subtitle
When a Yomitan popup is open, SubMiner also provides popup control shortcuts: When a Yomitan popup is open, SubMiner also provides popup control shortcuts:
| Shortcut | Action | | Shortcut | Action |
| ------------- | -------------------------------------- | | ----------- | ----------------------------------------------- |
| `J` | Scroll definitions down | | `J` | Scroll definitions down |
| `K` | Scroll definitions up | | `K` | Scroll definitions up |
| `ArrowDown` | Scroll definitions down | | `ArrowDown` | Scroll definitions down |
@@ -78,7 +78,7 @@ When a Yomitan popup is open, SubMiner also provides popup control shortcuts:
These shortcuts are fixed (not configurable) and require overlay focus. These shortcuts are fixed (not configurable) and require overlay focus.
| Shortcut | Action | | Shortcut | Action |
| ------------------ | --------------------------------------------------------------------- | | ------------------------------ | -------------------------------------------------------------------------------------------- |
| `Ctrl/Cmd+Shift+Y` | Toggle keyboard-driven token selection mode on/off | | `Ctrl/Cmd+Shift+Y` | Toggle keyboard-driven token selection mode on/off |
| `Ctrl/Cmd+Y` | Toggle lookup popup for selected token (open when closed, close when open) | | `Ctrl/Cmd+Y` | Toggle lookup popup for selected token (open when closed, close when open) |
| `ArrowLeft/Right`, `H`, or `L` | Move selected token (previous/next); if lookup is open, refresh definition for the new token | | `ArrowLeft/Right`, `H`, or `L` | Move selected token (previous/next); if lookup is open, refresh definition for the new token |

View File

@@ -196,7 +196,11 @@ function seasonSignalScore(requestedSeason: number | null, candidateTitle: strin
} as const; } as const;
const aliases = romanAliases[season] ?? []; const aliases = romanAliases[season] ?? [];
return aliases.some((alias) => normalized.includes(alias)) ? 40 : hasAnySequelMarker(candidateTitle) ? -20 : 5; return aliases.some((alias) => normalized.includes(alias))
? 40
: hasAnySequelMarker(candidateTitle)
? -20
: 5;
} }
function toMalSearchItems(payload: unknown): MalSearchResult[] { function toMalSearchItems(payload: unknown): MalSearchResult[] {
@@ -230,7 +234,11 @@ function parseAniSkipPayload(payload: unknown): { start: number; end: number } |
for (const rawResult of results) { for (const rawResult of results) {
const result = rawResult as AniSkipSkipItemPayload; const result = rawResult as AniSkipSkipItemPayload;
if (result.skip_type !== 'op' || typeof result.interval !== 'object' || result.interval === null) { if (
result.skip_type !== 'op' ||
typeof result.interval !== 'object' ||
result.interval === null
) {
continue; continue;
} }
const interval = result.interval as AniSkipIntervalPayload; const interval = result.interval as AniSkipIntervalPayload;
@@ -287,7 +295,9 @@ async function fetchAniSkipPayload(
malId: number, malId: number,
episode: number, episode: number,
): Promise<{ start: number; end: number } | null> { ): Promise<{ start: number; end: number } | null> {
const payload = await fetchJson<unknown>(`${ANISKIP_PAYLOAD_API}${malId}/${episode}?types=op&types=ed`); const payload = await fetchJson<unknown>(
`${ANISKIP_PAYLOAD_API}${malId}/${episode}?types=op&types=ed`,
);
const parsed = payload as AniSkipPayloadResponse; const parsed = payload as AniSkipPayloadResponse;
if (!parsed || parsed.found !== true) return null; if (!parsed || parsed.found !== true) return null;
return parseAniSkipPayload(parsed); return parseAniSkipPayload(parsed);
@@ -565,7 +575,9 @@ export function buildSubminerScriptOpts(
parts.push(`subminer-aniskip_intro_end=${aniSkipMetadata.introEnd}`); parts.push(`subminer-aniskip_intro_end=${aniSkipMetadata.introEnd}`);
} }
if (aniSkipMetadata?.lookupStatus) { if (aniSkipMetadata?.lookupStatus) {
parts.push(`subminer-aniskip_lookup_status=${sanitizeScriptOptValue(aniSkipMetadata.lookupStatus)}`); parts.push(
`subminer-aniskip_lookup_status=${sanitizeScriptOptValue(aniSkipMetadata.lookupStatus)}`,
);
} }
const aniskipPayload = aniSkipMetadata ? buildLauncherAniSkipPayload(aniSkipMetadata) : null; const aniskipPayload = aniSkipMetadata ? buildLauncherAniSkipPayload(aniSkipMetadata) : null;
if (aniskipPayload) { if (aniskipPayload) {

View File

@@ -1,6 +1,6 @@
{ {
"name": "subminer", "name": "subminer",
"version": "0.2.3", "version": "0.3.0",
"description": "All-in-one sentence mining overlay with AnkiConnect and dictionary integration", "description": "All-in-one sentence mining overlay with AnkiConnect and dictionary integration",
"packageManager": "bun@1.3.5", "packageManager": "bun@1.3.5",
"main": "dist/main-entry.js", "main": "dist/main-entry.js",

View File

@@ -174,7 +174,8 @@ export function applySubtitleDomainConfig(context: ResolveContext): void {
} }
const autoPauseVideoOnYomitanPopup = asBoolean( const autoPauseVideoOnYomitanPopup = asBoolean(
(src.subtitleStyle as { autoPauseVideoOnYomitanPopup?: unknown }).autoPauseVideoOnYomitanPopup, (src.subtitleStyle as { autoPauseVideoOnYomitanPopup?: unknown })
.autoPauseVideoOnYomitanPopup,
); );
if (autoPauseVideoOnYomitanPopup !== undefined) { if (autoPauseVideoOnYomitanPopup !== undefined) {
resolved.subtitleStyle.autoPauseVideoOnYomitanPopup = autoPauseVideoOnYomitanPopup; resolved.subtitleStyle.autoPauseVideoOnYomitanPopup = autoPauseVideoOnYomitanPopup;

View File

@@ -328,7 +328,9 @@ test('keyboard mode: up/down/j/k do not open or close lookup when popup is close
await wait(0); await wait(0);
const openEvents = testGlobals.commandEvents.filter((event) => event.type === 'scanSelectedText'); const openEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'scanSelectedText',
);
assert.equal(openEvents.length, 0); assert.equal(openEvents.length, 0);
const closeEvents = testGlobals.commandEvents.filter( const closeEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'setVisible' && event.visible === false, (event) => event.type === 'setVisible' && event.visible === false,
@@ -355,16 +357,28 @@ test('keyboard mode: up/down/j/k forward keydown to yomitan popup when open', as
testGlobals.dispatchKeydown({ key: 'j', code: 'KeyJ' }); testGlobals.dispatchKeydown({ key: 'j', code: 'KeyJ' });
testGlobals.dispatchKeydown({ key: 'k', code: 'KeyK' }); testGlobals.dispatchKeydown({ key: 'k', code: 'KeyK' });
const forwarded = testGlobals.commandEvents.filter( const forwarded = testGlobals.commandEvents.filter((event) => event.type === 'forwardKeyDown');
(event) => event.type === 'forwardKeyDown',
);
assert.equal(forwarded.length, 4); assert.equal(forwarded.length, 4);
assert.equal(forwarded.some((event) => event.code === 'ArrowUp'), true); assert.equal(
assert.equal(forwarded.some((event) => event.code === 'ArrowDown'), true); forwarded.some((event) => event.code === 'ArrowUp'),
assert.equal(forwarded.some((event) => event.code === 'KeyJ'), true); true,
assert.equal(forwarded.some((event) => event.code === 'KeyK'), true); );
assert.equal(
forwarded.some((event) => event.code === 'ArrowDown'),
true,
);
assert.equal(
forwarded.some((event) => event.code === 'KeyJ'),
true,
);
assert.equal(
forwarded.some((event) => event.code === 'KeyK'),
true,
);
const openEvents = testGlobals.commandEvents.filter((event) => event.type === 'scanSelectedText'); const openEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'scanSelectedText',
);
assert.equal(openEvents.length, 0); assert.equal(openEvents.length, 0);
const closeEvents = testGlobals.commandEvents.filter( const closeEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'setVisible' && event.visible === false, (event) => event.type === 'setVisible' && event.visible === false,
@@ -389,9 +403,7 @@ test('keyboard mode: repeated popup navigation keys are forwarded while popup is
testGlobals.dispatchKeydown({ key: 'j', code: 'KeyJ', repeat: true }); testGlobals.dispatchKeydown({ key: 'j', code: 'KeyJ', repeat: true });
testGlobals.dispatchKeydown({ key: 'ArrowDown', code: 'ArrowDown', repeat: true }); testGlobals.dispatchKeydown({ key: 'ArrowDown', code: 'ArrowDown', repeat: true });
const forwarded = testGlobals.commandEvents.filter( const forwarded = testGlobals.commandEvents.filter((event) => event.type === 'forwardKeyDown');
(event) => event.type === 'forwardKeyDown',
);
assert.equal(forwarded.length, 2); assert.equal(forwarded.length, 2);
assert.deepEqual( assert.deepEqual(
forwarded.map((event) => ({ code: event.code, repeat: event.repeat })), forwarded.map((event) => ({ code: event.code, repeat: event.repeat })),
@@ -445,7 +457,9 @@ test('keyboard mode: h moves left while popup is open and keeps lookup active',
await wait(80); await wait(80);
assert.equal(ctx.state.keyboardSelectedWordIndex, 1); assert.equal(ctx.state.keyboardSelectedWordIndex, 1);
const openEvents = testGlobals.commandEvents.filter((event) => event.type === 'scanSelectedText'); const openEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'scanSelectedText',
);
assert.equal(openEvents.length > 0, true); assert.equal(openEvents.length > 0, true);
const closeEvents = testGlobals.commandEvents.filter( const closeEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'setVisible' && event.visible === false, (event) => event.type === 'setVisible' && event.visible === false,
@@ -546,7 +560,9 @@ test('keyboard mode: popup-open edge jump refreshes lookup on the new subtitle s
await wait(80); await wait(80);
assert.equal(ctx.state.keyboardSelectedWordIndex, 0); assert.equal(ctx.state.keyboardSelectedWordIndex, 0);
const openEvents = testGlobals.commandEvents.filter((event) => event.type === 'scanSelectedText'); const openEvents = testGlobals.commandEvents.filter(
(event) => event.type === 'scanSelectedText',
);
assert.equal(openEvents.length > 0, true); assert.equal(openEvents.length > 0, true);
} finally { } finally {
ctx.state.keyboardDrivenModeEnabled = false; ctx.state.keyboardDrivenModeEnabled = false;

View File

@@ -130,7 +130,9 @@ export function createKeyboardHandlers(
} }
function getSubtitleWordNodes(): HTMLElement[] { function getSubtitleWordNodes(): HTMLElement[] {
return Array.from(ctx.dom.subtitleRoot.querySelectorAll<HTMLElement>('.word[data-token-index]')); return Array.from(
ctx.dom.subtitleRoot.querySelectorAll<HTMLElement>('.word[data-token-index]'),
);
} }
function syncKeyboardTokenSelection(): void { function syncKeyboardTokenSelection(): void {
@@ -188,7 +190,9 @@ export function createKeyboardHandlers(
setKeyboardDrivenModeEnabled(!ctx.state.keyboardDrivenModeEnabled); setKeyboardDrivenModeEnabled(!ctx.state.keyboardDrivenModeEnabled);
} }
function moveKeyboardSelection(delta: -1 | 1): 'moved' | 'start-boundary' | 'end-boundary' | 'no-words' { function moveKeyboardSelection(
delta: -1 | 1,
): 'moved' | 'start-boundary' | 'end-boundary' | 'no-words' {
const wordNodes = getSubtitleWordNodes(); const wordNodes = getSubtitleWordNodes();
if (wordNodes.length === 0) { if (wordNodes.length === 0) {
ctx.state.keyboardSelectedWordIndex = null; ctx.state.keyboardSelectedWordIndex = null;

View File

@@ -2,10 +2,7 @@ import assert from 'node:assert/strict';
import test from 'node:test'; import test from 'node:test';
import { createMouseHandlers } from './mouse.js'; import { createMouseHandlers } from './mouse.js';
import { import { YOMITAN_POPUP_HIDDEN_EVENT, YOMITAN_POPUP_SHOWN_EVENT } from '../yomitan-popup.js';
YOMITAN_POPUP_HIDDEN_EVENT,
YOMITAN_POPUP_SHOWN_EVENT,
} from '../yomitan-popup.js';
function createClassList() { function createClassList() {
const classes = new Set<string>(); const classes = new Set<string>();