mirror of
https://github.com/ksyasuda/SubMiner.git
synced 2026-05-04 12:41:30 -07:00
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
4d5bf3de41
|
|||
|
8342fa0c0e
|
|||
|
745996c72d
|
|||
|
95277f30bd
|
|||
|
08dc8871d3
|
|||
|
402b58385d
|
|||
|
b8dc5db14a
|
|||
|
47161cd8a5
|
|||
|
9bcea2fc5f
|
|||
|
00a94d6bd1
|
|||
|
69d5cc7557
|
|||
|
040741cf57
|
|||
|
b245ca642d
|
|||
|
cd057d1a4f
|
|||
|
6b9cb13b07
|
|||
|
a9c3a5e679
|
|||
|
b926f97578
|
|||
|
a9625f8777
|
|||
|
f83005bf70
|
|||
|
508f243d76
|
|||
|
f96467a1d6
|
|||
|
6607b06437
|
|||
|
2a06bfc989
|
|||
|
55ec191db5
|
|||
|
0c051c988c
|
|||
|
d9b3028ef1
|
|||
|
424ff991c4
|
|||
|
c0a37622a0
|
|||
|
5afec94f71
|
|||
|
cca68af2b7
|
|||
|
8dd63d69a2
|
|||
|
9042edf68a
|
|||
|
8217d052e9
|
|||
|
544a770c09
|
|||
| 30712738dc | |||
| 0915b23dc8 |
@@ -0,0 +1,76 @@
|
|||||||
|
---
|
||||||
|
id: TASK-336
|
||||||
|
title: Fix Hyprland fullscreen overlay downward offset
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 05:42'
|
||||||
|
updated_date: '2026-05-04 06:10'
|
||||||
|
labels:
|
||||||
|
- linux
|
||||||
|
- hyprland
|
||||||
|
- overlay
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- src/window-trackers/hyprland-tracker.ts
|
||||||
|
- src/core/services/overlay-window-bounds.ts
|
||||||
|
- src/main/runtime/linux-mpv-fullscreen-overlay-refresh.ts
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
SubMiner visible overlay is slightly below mpv when mpv is fullscreen on Linux Hyprland. Align overlay bounds with mpv fullscreen client/monitor bounds.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Hyprland fullscreen mpv overlay uses top-aligned geometry instead of inheriting a downward offset.
|
||||||
|
- [x] #2 Regression coverage captures the fullscreen Hyprland geometry case.
|
||||||
|
- [x] #3 Targeted tests pass.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Added follow-up Hyprland placement handling after the fullscreenClient geometry fix. SubMiner overlay/stats windows now get stable titles and, on Hyprland, are resolved from `hyprctl -j clients` by current PID/title, then set floating before bounds are applied. The stats overlay reapplies bounds after showing because Hyprland cannot see the hidden window before it is mapped.
|
||||||
|
|
||||||
|
2026-05-04 follow-up: offset remains after removing pinning. User reports stats modal still has a top gap from mpv in Hyprland fullscreen. Need inspect exact stats overlay CSS/window bounds after float-only placement.
|
||||||
|
|
||||||
|
2026-05-04 follow-up fix: stats CSS already had zero body margin, so the remaining gap points at native Hyprland placement after float-only handling. Added exact `movewindowpixel`/`resizewindowpixel` Hyprland dispatches using the same tracked mpv bounds passed to Electron.
|
||||||
|
|
||||||
|
2026-05-04 second follow-up: live `hyprctl -j clients` showed the SubMiner client was already full monitor size at `[0,0]`, so the remaining visible top strip was inside Electron's transparent stats surface rather than compositor geometry. Made the stats overlay BrowserWindow opaque with the stats base background. Also prevented page titles from overwriting the stable SubMiner overlay/stats titles used for Hyprland client matching.
|
||||||
|
|
||||||
|
2026-05-04 third follow-up: user confirmed native overlay placement is correct and the remaining gap is stats-page-specific. Made stats overlay mode paint an opaque full-viewport root/background and constrained the stats app to `h-screen` with an internal scrolling main pane, so the overlay page itself covers the mpv frame from y=0.
|
||||||
|
|
||||||
|
2026-05-04 fourth follow-up: live Hyprland data showed mpv and SubMiner shared the same outer geometry while stats content still rendered lower. Stats window placement now compensates for Electron/Wayland content insets using `getContentBounds()` versus `getBounds()`, then sends the adjusted outer bounds to Hyprland exact placement so the content area, not just the native surface, aligns to mpv.
|
||||||
|
|
||||||
|
2026-05-04 fifth follow-up: user confirmed the offset is Hyprland-fullscreen-only and not present while mpv is windowed. Added Hyprland `setprop` decoration cleanup during exact overlay placement (`rounding 0`, `border_size 0`, `no_shadow 1`, `no_blur 1`, `decorate 0`) because fullscreen mpv has square fullscreen edges while a floating SubMiner stats window can retain Hyprland floating-window decoration.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Treated Hyprland `fullscreenClient` as a fullscreen signal when resolving mpv overlay geometry.
|
||||||
|
- Added Hyprland window placement handling so SubMiner overlay/stats windows are set floating before bounds are applied.
|
||||||
|
- Added exact Hyprland move/resize dispatches so floating overlay/stats windows are force-aligned to the tracked mpv bounds.
|
||||||
|
- Gave overlay/stats windows stable titles for Hyprland client matching, and reapplied stats bounds after show.
|
||||||
|
- Locked overlay/stats window titles against page title changes and made the stats overlay window opaque so mpv cannot show through transparent Electron insets.
|
||||||
|
- Made the stats overlay page paint an opaque full-viewport background and added CSS regression coverage for overlay mode.
|
||||||
|
- Compensated stats overlay outer placement for Electron/Wayland content insets.
|
||||||
|
- Disabled Hyprland floating-window decoration for exact overlay placement over fullscreen mpv.
|
||||||
|
- Added regression coverage for the 28px fullscreen geometry shape and Hyprland placement dispatches.
|
||||||
|
- Added a changelog fragment for the overlay fix.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/core/services/hyprland-window-placement.test.ts src/core/services/overlay-window-config.test.ts src/core/services/stats-window.test.ts src/core/services/overlay-window-bounds.test.ts src/window-trackers/hyprland-tracker.test.ts`
|
||||||
|
- `bun run typecheck`
|
||||||
|
- `bun run changelog:lint`
|
||||||
|
- `bun run test:fast`
|
||||||
|
- `bun test stats/src/styles/globals.test.ts stats/src/lib/api-client.test.ts src/core/services/stats-window.test.ts`
|
||||||
|
- `bun run build:stats`
|
||||||
|
- `bun test src/core/services/stats-window.test.ts src/core/services/hyprland-window-placement.test.ts stats/src/styles/globals.test.ts`
|
||||||
|
- `bun test src/core/services/hyprland-window-placement.test.ts src/core/services/stats-window.test.ts stats/src/styles/globals.test.ts`
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
id: TASK-339
|
||||||
|
title: Stop pinning Hyprland overlay windows
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 06:07'
|
||||||
|
updated_date: '2026-05-04 06:09'
|
||||||
|
labels:
|
||||||
|
- linux
|
||||||
|
- hyprland
|
||||||
|
- overlay
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- src/core/services/hyprland-window-placement.ts
|
||||||
|
- src/core/services/overlay-window.ts
|
||||||
|
- src/core/services/stats-window.ts
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Recent Hyprland placement fix pins SubMiner overlay/stats windows, making them follow across workspaces instead of staying attached to mpv. Keep the float-for-bounds behavior, but never pin overlay windows.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Hyprland placement dispatches set floating state only and does not dispatch pin.
|
||||||
|
- [x] #2 Regression coverage proves pinned clients are unpinned or at least not re-pinned by SubMiner.
|
||||||
|
- [x] #3 Targeted tests and typecheck pass.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Changed Hyprland placement dispatch construction so unpinned overlay windows only get `setfloating`; pinned overlay windows get a single `pin` dispatch to toggle the bad prior pinned state off. This preserves floating placement for bounds while keeping overlay windows workspace-local with mpv.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Stopped re-pinning Hyprland overlay/stats windows during placement.
|
||||||
|
- Added cleanup behavior for previously pinned SubMiner windows by toggling pin only when Hyprland reports `pinned: true`.
|
||||||
|
- Updated regression coverage and added a changelog fragment.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/core/services/hyprland-window-placement.test.ts src/core/services/overlay-window-config.test.ts src/core/services/stats-window.test.ts src/core/services/overlay-window-bounds.test.ts src/window-trackers/hyprland-tracker.test.ts`
|
||||||
|
- `bun run typecheck`
|
||||||
|
- `bun run changelog:lint`
|
||||||
|
- `bun run test:fast`
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+27
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
id: TASK-304
|
||||||
|
title: Fix N+1 sentence boundary counting across Yomitan punctuation gaps
|
||||||
|
status: In Progress
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-04-26 05:33'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- tokenizer
|
||||||
|
- annotations
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
N+1 target selection should respect sentence-ending punctuation from the original subtitle text even when Yomitan token output omits punctuation tokens. Current behavior can treat multiple subtitle sentences as one token span and incorrectly satisfy the minimum content-token threshold.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [ ] #1 A subtitle like `てんめ!ふざけんなよ!` does not mark `ふざけん`/similar single-content-token second sentence as N+1 when the minimum sentence word count is 3.
|
||||||
|
- [ ] #2 N+1 sentence segmentation uses original subtitle text offsets or equivalent source-boundary data, not only punctuation tokens returned by Yomitan.
|
||||||
|
- [ ] #3 Existing annotation exclusion behavior for particles/grammar tokens remains unchanged.
|
||||||
|
- [ ] #4 Regression tests cover Yomitan-style token streams where punctuation is absent from the token list.
|
||||||
|
<!-- AC:END -->
|
||||||
@@ -0,0 +1,64 @@
|
|||||||
|
---
|
||||||
|
id: TASK-305
|
||||||
|
title: Use Yomitan word classes for subtitle token POS filtering
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-04-26 05:56'
|
||||||
|
updated_date: '2026-05-02 22:47'
|
||||||
|
labels:
|
||||||
|
- tokenizer
|
||||||
|
- yomitan
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Subtitle annotation filtering currently uses Yomitan token spans, then enriches those spans by running MeCab over the full normalized subtitle line. Add support for carrying Yomitan headword wordClasses from termsFind into SubMiner tokens so dictionary-backed tokens can provide coarse POS/tag metadata without vendored Yomitan changes. MeCab whole-line enrichment should remain a fallback/source of detailed POS data when Yomitan classes are absent.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Yomitan scanner tokens preserve matched headword wordClasses when termsFind returns them.
|
||||||
|
- [x] #2 Subtitle tokenization maps recognized Yomitan wordClasses to coarse PartOfSpeech/POS metadata before annotation filtering.
|
||||||
|
- [x] #3 Whole-line MeCab enrichment remains available for missing or more detailed POS metadata and does not break existing subtitle annotation behavior.
|
||||||
|
- [x] #4 Focused tokenizer tests cover wordClasses extraction and POS mapping.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add focused regression coverage for Yomitan scanner wordClasses payload and subtitle POS mapping.
|
||||||
|
2. Extend the app-owned Yomitan scanner payload to carry matched headword wordClasses when present.
|
||||||
|
3. Map recognized Yomitan wordClasses to SubMiner coarse PartOfSpeech/POS metadata before annotation filtering.
|
||||||
|
4. Keep MeCab whole-line enrichment as fallback/detail-fill for missing POS fields.
|
||||||
|
5. Run focused tokenizer tests and typecheck.
|
||||||
|
|
||||||
|
2026-05-02 review follow-up: inspect latest CodeRabbit review on PR #57, classify each finding as actionable/not actionable, patch scoped issues, run focused verification, then update final notes. User request to address/assess the review is the approval for this follow-up.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented app-only wordClasses extraction from termsFind results; no vendored Yomitan changes required. Recognized classes currently map prt, aux, v*, adj-i/adj-ix, adj-na, and noun-like classes to SubMiner POS metadata. MeCab enrichment now skips only tokens with complete pos1/pos2/pos3 and otherwise fills missing fields while preserving existing coarse pos1. Verification: bun test src/core/services/tokenizer/yomitan-parser-runtime.test.ts src/core/services/tokenizer.test.ts; bun run typecheck.
|
||||||
|
|
||||||
|
2026-05-02 CodeRabbit latest review assessment: only current actionable finding was in src/core/services/tokenizer/annotation-stage.test.ts, where a kana-only regression fixture used mixed-script/punctuation surface text. Earlier CodeRabbit findings in this PR were already marked addressed by prior commits. Patched the fixture to use pure-kana surface/headword and renamed the test to match the exercised behavior. Verification: bun test src/core/services/tokenizer/annotation-stage.test.ts; bun run typecheck.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Implemented app-only Yomitan wordClasses support for subtitle token annotation filtering. The scanner carries matched headword wordClasses from termsFind results, tokenizer maps recognized classes into SubMiner coarse POS metadata before annotation, and MeCab whole-line enrichment continues to fill missing detailed POS fields without requiring vendored Yomitan changes.
|
||||||
|
|
||||||
|
2026-05-02 CodeRabbit follow-up:
|
||||||
|
- Assessed the latest CodeRabbit review on PR #57. Only one new actionable finding remained: the kana-only N+1 regression test used a mixed/punctuated surface.
|
||||||
|
- Updated the fixture in src/core/services/tokenizer/annotation-stage.test.ts to use a pure-kana unknown target and renamed the test accordingly.
|
||||||
|
|
||||||
|
Tests run:
|
||||||
|
- bun test src/core/services/tokenizer/annotation-stage.test.ts
|
||||||
|
- bun run typecheck
|
||||||
|
|
||||||
|
Note: earlier CodeRabbit findings on this PR were already marked addressed in prior commits; no further latest-review issues were left unresolved in this pass.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
id: TASK-306
|
||||||
|
title: Fix Hyprland fullscreen overlay geometry and hover pause
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-04-27 01:44'
|
||||||
|
labels:
|
||||||
|
- linux
|
||||||
|
- hyprland
|
||||||
|
- overlay
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
|
||||||
|
Overlay should track mpv geometry through Hyprland fullscreen transitions, stay above fullscreen video, and keep primary subtitle hover pause working after fullscreen/toggle cycles.
|
||||||
|
|
||||||
|
Implemented by observing mpv fullscreen property changes in addition to Hyprland geometry events, then refreshing visible overlay bounds/layering on Linux.
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
|
||||||
|
- [x] #1 Hyprland tracker reacts to fullscreen/window state changes with updated geometry.
|
||||||
|
- [x] #2 Visible overlay is re-layered above mpv after Hyprland fullscreen geometry updates.
|
||||||
|
- [x] #3 Primary subtitle hover pause remains active after overlay geometry changes or visible overlay toggle cycles.
|
||||||
|
<!-- AC:END -->
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
id: TASK-307
|
||||||
|
title: Exclude kana-only words from N+1 subtitle targets
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-04-27 01:52'
|
||||||
|
updated_date: '2026-04-27 01:57'
|
||||||
|
labels:
|
||||||
|
- tokenizer
|
||||||
|
- annotations
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Subtitle N+1 annotation is over-targeting kana-only or hiragana/katakana tokens that collapse to dictionary words. Adjust targeting so kana-only tokens are not selected as N+1 candidates, while preserving tokenization/hover behavior and other annotation metadata where existing filters allow it.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Kana-only subtitle tokens are not marked as N+1 targets.
|
||||||
|
- [x] #2 Kanji or mixed lexical tokens can still be marked as N+1 targets when they are the single unknown candidate in a sentence.
|
||||||
|
- [x] #3 Regression coverage demonstrates the kana-only N+1 exclusion.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a failing regression in `src/core/services/tokenizer.test.ts` showing a kana-only Yomitan token is not selected as the single N+1 target, while a mixed lexical token in the same style still can be targeted.
|
||||||
|
2. Implement the smallest filter in `src/token-merger.ts`: N+1 candidate selection rejects tokens whose surface is entirely kana; word-count behavior remains governed by existing annotation/POS filters.
|
||||||
|
3. Run the focused tokenizer tests, then update task acceptance criteria/final summary.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented a surface-level kana-only guard in N+1 candidate selection. Kept existing word-count/POS filtering behavior intact; updated tokenizer and annotation-stage expectations where old tests intentionally allowed kana-only N+1 targets.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Added kana-only surface detection to `isNPlusOneCandidateToken` so hiragana/katakana-only subtitle tokens are not selected as N+1 targets.
|
||||||
|
- Added/updated tokenizer and annotation-stage regressions for kana-only targets while preserving non-kana N+1 behavior.
|
||||||
|
- Added changelog fragment `changes/307-kana-nplusone-targets.md`.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/core/services/tokenizer.test.ts --test-name-pattern "kana-only N\+1"` failed before the fix with `true !== false`.
|
||||||
|
- `bun test src/core/services/tokenizer/annotation-stage.test.ts src/core/services/tokenizer.test.ts` passed.
|
||||||
|
- `bun run typecheck` passed.
|
||||||
|
- `bun run test:fast` passed.
|
||||||
|
- `bun run changelog:lint` passed.
|
||||||
|
- `bunx prettier --check src/core/services/tokenizer.test.ts src/core/services/tokenizer/annotation-stage.test.ts src/token-merger.ts changes/307-kana-nplusone-targets.md` passed.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
id: TASK-308
|
||||||
|
title: Restore persistent JLPT subtitle underlines
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-04-27 02:03'
|
||||||
|
updated_date: '2026-04-27 02:07'
|
||||||
|
labels:
|
||||||
|
- overlay
|
||||||
|
- jlpt
|
||||||
|
- renderer
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
JLPT tagging currently exposes the JLPT level on hover, but the persistent subtitle underline is missing. When JLPT annotation is enabled and a rendered subtitle token has a JLPT level, users should see the configured JLPT color underline without needing to hover.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 JLPT-tagged subtitle tokens render a persistent underline for N1-N5 levels when JLPT tagging is enabled.
|
||||||
|
- [x] #2 Hover and keyboard-selected JLPT labels continue to appear for tagged tokens.
|
||||||
|
- [x] #3 Higher-priority annotation colors such as known words, N+1, names, and frequency styling are not overridden by JLPT text color.
|
||||||
|
- [x] #4 Regression coverage verifies the CSS contract for persistent JLPT underlines.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a focused renderer CSS regression asserting each `word-jlpt-n*` class provides persistent underline decoration while preserving existing typography constraints.
|
||||||
|
2. Run the focused renderer test to confirm the regression fails before production changes.
|
||||||
|
3. Restore underline CSS for JLPT classes without broadening JLPT text-color precedence over known/N+1/name/frequency tokens.
|
||||||
|
4. Re-run the focused renderer test and update acceptance criteria/task notes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Verified red/green regression: tightened `src/renderer/subtitle-render.test.ts` first failed because base `word-jlpt-n*` selectors had no underline decoration, then passed after moving JLPT underline decoration to unconditional base selectors while leaving JLPT text color priority-scoped.
|
||||||
|
|
||||||
|
Checks: `bun test src/renderer/subtitle-render.test.ts`; `bun run changelog:lint`; `bun run typecheck`.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Restored persistent JLPT subtitle underlines by adding underline decoration to each base `word-jlpt-n*` renderer CSS class. JLPT text color remains in the existing priority-scoped selectors, so known/N+1/name/frequency coloring is not overridden while the underline still appears on any JLPT-tagged token.
|
||||||
|
|
||||||
|
Updated renderer CSS regression coverage to assert underline decoration for N1-N5 and added a fixed changelog fragment. Verified with `bun test src/renderer/subtitle-render.test.ts`, `bun run changelog:lint`, and `bun run typecheck`.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+57
@@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
id: TASK-309
|
||||||
|
title: Accept modified follow-up digits for multi-line sentence mining
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@codex'
|
||||||
|
created_date: '2026-04-27 20:06'
|
||||||
|
updated_date: '2026-04-27 20:15'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- linux
|
||||||
|
- shortcuts
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
|
||||||
|
On Linux, `Ctrl+Shift+S` starts multi-line sentence-card mining, but the follow-up digit is not accepted and the prompt times out. Restore reliable digit capture for the multi-mine flow, including the common case where the original shortcut modifiers are still held briefly while pressing the digit.
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
|
||||||
|
- [x] #1 `Ctrl+Shift+S` followed by a number-row digit creates a counted `mineSentenceMultiple` request instead of timing out.
|
||||||
|
- [x] #2 Follow-up digit capture works when the user has not fully released `Ctrl`/`Shift` after the starter shortcut.
|
||||||
|
- [x] #3 Regression coverage includes renderer session bindings and mpv plugin numeric selection.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
|
||||||
|
Backlog MCP unavailable in this session, so this task is tracked via repo-local backlog files.
|
||||||
|
|
||||||
|
Implemented renderer digit extraction from `KeyboardEvent.code` for pending numeric selection, so shifted number-row events such as `Ctrl+Shift+Digit3` still dispatch count `3`. Updated the mpv plugin session-binding numeric selector to register bare digits plus the starter shortcut modifier combinations, so plugin-owned `Ctrl+Shift+S` can accept a follow-up digit before the modifiers are fully released.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
|
||||||
|
- `bun test src/renderer/handlers/keyboard.test.ts src/core/services/overlay-shortcut-handler.test.ts src/core/services/overlay-window.test.ts`
|
||||||
|
- `bun run test:plugin:src`
|
||||||
|
- `bun run changelog:lint`
|
||||||
|
- `bun x prettier --check src/renderer/handlers/keyboard.ts src/renderer/handlers/keyboard.test.ts package.json 'changes/309-multi-mine-modified-digits.md' 'backlog/tasks/task-309 - Accept-modified-follow-up-digits-for-multi-line-sentence-mining.md'`
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
|
||||||
|
Restored multi-line sentence-card digit capture for the case where `Ctrl`/`Shift` are still held after `Ctrl+Shift+S`. The renderer now accepts digits by physical `Digit1`-`Digit9`/`Numpad1`-`Numpad9` code during pending numeric selection, and the mpv plugin registers the matching modified digit bindings for session-binding numeric prompts.
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
id: TASK-310
|
||||||
|
title: Suppress N+1 highlight for kana-only candidate sentences
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-04-28 06:55'
|
||||||
|
updated_date: '2026-04-28 07:04'
|
||||||
|
labels:
|
||||||
|
- tokenizer
|
||||||
|
- n+1
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Reduce noisy N+1 subtitle annotations when the only unknown candidates in a sentence are kana-only hiragana or katakana words, such as mostly-kana subtitle lines where highlighting a particle/helper-like token is low value.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 N+1 annotation does not mark a kana-only unknown target when all N+1 candidates in the sentence are kana-only.
|
||||||
|
- [x] #2 N+1 annotation continues to mark kanji or mixed-script unknown targets in otherwise eligible sentences.
|
||||||
|
- [x] #3 A focused regression test covers the kana-only candidate case.
|
||||||
|
- [x] #4 N+1 minimum sentence word count excludes tokens stripped by the subtitle annotation filter, so filtered grammar/noise tokens cannot satisfy minSentenceWords.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Keep the existing N+1 target eligibility guard: kana-only subtitle surfaces do not become N+1 targets.
|
||||||
|
2. Add a focused regression in src/core/services/tokenizer/annotation-stage.test.ts proving annotation-filtered tokens do not count toward ankiConnect.nPlusOne.minSentenceWords.
|
||||||
|
3. Verify the new regression fails before code changes.
|
||||||
|
4. Patch src/token-merger.ts so the N+1 minimum sentence word count uses the same subtitle-annotation eligibility filter as annotation rendering, excluding filtered particles/auxiliaries/noise from the count.
|
||||||
|
5. Re-run focused tokenizer tests, then update TASK-310 acceptance criteria and final notes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Initial context: current token-merger has an existing surface-level kana-only guard in isNPlusOneCandidateToken, added in commit 9e4ad907. Need decide whether to broaden behavior to lookup/headword forms or verify current behavior only.
|
||||||
|
|
||||||
|
Implemented by treating kana-only N+1 candidates as kana-only even when their token surface includes surrounding subtitle punctuation such as ellipsis or dashes. Focused regression was red before the token-merger change: スイッチ… was marked true, then passed after the guard update. test:env initially hit an unrelated immersion-tracker active_days timing/order failure and Bun follow-on loader error; the failing test passed in isolation and the full test:env rerun passed.
|
||||||
|
|
||||||
|
Reopened for follow-up scope: minSentenceWords must count annotation-eligible tokens only, not tokens stripped from annotation metadata.
|
||||||
|
|
||||||
|
Implemented follow-up minSentenceWords behavior: unknown tokens filtered from N+1 targeting no longer contribute to sentence length; known eligible tokens and true N+1 candidates still count.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Changed N+1 sentence-length counting so minSentenceWords only counts known eligible words and actual N+1 target candidates. Unknown tokens filtered from N+1 targeting, including kana-only unknowns, no longer pad a sentence into eligibility. Existing annotation-filtered particles/auxiliaries remain excluded. Added regression coverage for the filtered unknown padding case while preserving kanji/mixed-script target behavior.
|
||||||
|
|
||||||
|
Verification: new regression failed before implementation; `bun test src/core/services/tokenizer/annotation-stage.test.ts -t "N\\+1"` pass; full `bun test src/core/services/tokenizer/annotation-stage.test.ts` pass; `bun test src/core/services/tokenizer.test.ts -t "N\\+1"` pass; `bun run typecheck` pass.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+43
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
id: TASK-311
|
||||||
|
title: Suppress auxiliary inflection fragments from subtitle annotations
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-02 09:07'
|
||||||
|
updated_date: '2026-05-02 09:10'
|
||||||
|
labels:
|
||||||
|
- tokenizer
|
||||||
|
- annotations
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Suppress standalone Japanese auxiliary/inflection subtitle fragments such as `れる` and `れた` from frequency/JLPT/N+1/known annotation styling while keeping lexical verbs such as `くれ` / `くれる` annotatable. Tokens must remain hoverable; only annotation metadata should be stripped.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 `れる` and `れた`-style standalone helper fragments render as plain hoverable subtitle tokens.
|
||||||
|
- [x] #2 Lexical verbs like `くれ` / `くれる` remain eligible for annotation.
|
||||||
|
- [x] #3 Regression tests cover unit filter behavior and tokenizer integration.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented with TDD. Added failing coverage first for standalone `れる`/`れた` auxiliary fragments and a lexical `くれ`/`くれる` guard. Updated the shared subtitle annotation filter to strip annotation metadata for kana-only auxiliary inflection fragments identified by MeCab POS (`助動詞` only, or `動詞/接尾` with optional trailing `助動詞`) while preserving lexical `くれ` as `くれる` when tagged `動詞/自立`. Added tokenizer integration coverage for `れた` and neighboring lexical N+1 behavior.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Suppressed annotation metadata for standalone auxiliary inflection fragments such as `れる` and `れた` in subtitle tokens, leaving them hoverable but plain. Preserved lexical `くれ` -> `くれる` verb metadata when MeCab tags it as `動詞/自立`.
|
||||||
|
|
||||||
|
Added unit and tokenizer regression coverage, plus a release fragment in `changes/311-auxiliary-inflection-annotation-filter.md`.
|
||||||
|
|
||||||
|
Validation: targeted annotation/tokenizer tests passed; `bun run typecheck` passed; `bun run changelog:lint` passed. `bun run test:fast` was attempted twice and failed in unrelated `src/core/services/subsync.test.ts` cross-file state (`window.electronAPI` undefined), while `bun test src/core/services/subsync.test.ts` passes by itself.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
id: TASK-312
|
||||||
|
title: Suppress ja-nai explanatory ending subtitle annotations
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-02 09:55'
|
||||||
|
updated_date: '2026-05-02 10:03'
|
||||||
|
labels:
|
||||||
|
- tokenizer
|
||||||
|
- annotations
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Suppress subtitle annotation styling for grammar-only explanatory endings like `じゃない` and `じゃないですか` while preserving nearby lexical content annotations.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 `じゃない` and `じゃないですか`-style endings render as plain hoverable subtitle tokens.
|
||||||
|
- [x] #2 The reported phrase `みたいなのあるじゃないですか` does not annotate `じゃない`/`じゃないですか` as lexical/frequency content.
|
||||||
|
- [x] #3 Regression tests cover unit filter behavior and tokenizer integration without suppressing lexical content tokens.
|
||||||
|
- [x] #4 Standalone polite copula endings such as `です` / `ですよ` render as plain hoverable subtitle tokens even if POS metadata is missing or too lexical.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Added failing coverage first for `じゃない` / `じゃないですか` and `ですよ` leaking annotation metadata when POS metadata is missing or too lexical. Implemented term-family exclusions in the shared subtitle annotation filter for the `じゃない` explanatory family and polite copula suffix endings (`ですか`, `ですね`, `ですよ`, `ですな`). Kept bare `です` term-only behavior unchanged to preserve existing no-POS frequency tests; POS-tagged `です` is already stripped by the grammar POS exclusion path.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Suppressed subtitle annotation metadata for grammar-only endings like `じゃないですか` and `ですよ`, while preserving nearby lexical content annotations. Added unit and tokenizer regression coverage for the reported `みたいなのあるじゃないですか` and `感じですよ` shapes, plus changelog fragment `changes/312-grammar-ending-annotation-filter.md`.
|
||||||
|
|
||||||
|
Validation: `bun test src/core/services/tokenizer/annotation-stage.test.ts`; `bun test src/core/services/tokenizer.test.ts`; `bun run typecheck`; `bun run changelog:lint`; `git diff --check`.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+67
@@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
id: TASK-315
|
||||||
|
title: Suppress annotations for standalone じゃない and です ending tokens
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-03 00:02'
|
||||||
|
updated_date: '2026-05-03 06:05'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- tokenizer
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Standalone `じゃない` grammar ending tokens should not display or persist subtitle annotations even if a dictionary assigns a rank or JLPT/known match. User observed `じゃない` still being marked frequent in overlay after tokenization produced it as a dictionary word.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 `じゃない` and `です` ending tokens have known-word, N+1, frequency, and JLPT annotation metadata cleared in subtitle annotation output.
|
||||||
|
- [x] #2 Common polite/question variants such as `じゃないですか` and `ですよ` remain excluded when tokenized as a single ending token.
|
||||||
|
- [x] #3 Regression coverage proves same-line Yomitan segments split content from trailing grammar endings so the content word can be annotated without coloring the ending.
|
||||||
|
- [x] #4 Auxiliary-only helper spans such as `てく` + `れた` in `ベアトリスがいてくれたから` have known-word, N+1, frequency, and JLPT annotation metadata cleared.
|
||||||
|
- [x] #5 Hard-coded grammar-ending phrase permutations are replaced by shared pattern matching, with parser selection and subtitle annotation filtering using the same grammar-ending classifier.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a focused regression for `ベアトリスがいてくれたから` where Yomitan tokens include auxiliary-only `てく` and `れた` with pre-ranked/known/JLPT metadata candidates.
|
||||||
|
2. Run the targeted test to verify the regression fails before production changes.
|
||||||
|
3. Patch the shared subtitle annotation filter so kana-only auxiliary helper spans made only of grammar POS components are excluded while preserving lexical content tokens.
|
||||||
|
4. Re-run targeted tokenizer/annotation tests, then run SubMiner change verification classifier/verifier for the touched files.
|
||||||
|
5. Update TASK-315 acceptance criteria, notes, and final summary with commands and outcomes.
|
||||||
|
|
||||||
|
Replace explicit standalone grammar-ending permutations with a compact shared matcher used by parser selection and annotation filtering.
|
||||||
|
|
||||||
|
Add regression tests first for non-enumerated polite copula / ja-nai variants so the matcher behavior is proven, then refactor implementation and verify targeted lanes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented as one focused tokenizer fix. Parser selection now splits dictionary-backed same-line grammar ending segments (`です`, `じゃない*`) from preceding content so annotation styling can apply only to the content token. Shared subtitle annotation filtering now treats bare `です` like the existing `ですか/ですよ/...` copula endings.
|
||||||
|
|
||||||
|
2026-05-03: Reopened for approved add-on covering auxiliary-only `てく` + `れた` helper highlighting report.
|
||||||
|
|
||||||
|
2026-05-03: Added regression coverage for `ベアトリスがいてくれたから` where Yomitan emits `てく` + `れた` and MeCab enrichment tags `てく` as `助詞|動詞` / `接続助詞|非自立`. The regression initially failed because `てく` kept `isKnown: true` and `jlptLevel: N4`. Added a shared-filter helper for kana-only particle+non-independent-verb helper spans, preserving lexical `自立` verbs. Verification: `bun test src/core/services/tokenizer/annotation-stage.test.ts`, `bun test src/core/services/tokenizer.test.ts`, `bun test src/core/services/tokenizer/parser-selection-stage.test.ts`, `bun x prettier --check ...`, and `bun run typecheck` passed. SubMiner verifier core lane passed typecheck but `bun run test:fast` failed on unrelated existing cross-suite issues: `window.electronAPI` undefined in `src/renderer/handlers/keyboard.ts` during `src/core/services/subsync.test.ts`, followed by Bun `node:test` nested-test cascade.
|
||||||
|
|
||||||
|
2026-05-03: Reopened for follow-up requested by user: remove hard-coded standalone grammar-ending permutation list and lean on pattern/POS filtering where possible.
|
||||||
|
|
||||||
|
2026-05-03: Added shared `grammar-ending.ts` matcher for polite copula, negative copula, and explanatory endings. Parser selection now uses the standalone-ending matcher instead of `STANDALONE_GRAMMAR_ENDINGS`. Shared subtitle filter now uses the same grammar classifier instead of generated phrase sets. Removed stale duplicate subtitle-exclusion helpers from `annotation-stage.ts`; annotation-stage continues to delegate subtitle exclusion to the shared filter. Verification passed: targeted tokenizer/parser/annotation tests, Prettier check, `bun run typecheck`, `bun run test:fast`, `bun run test:env`, `bun run build`, and `bun run test:smoke:dist`. `bun run changelog:lint` remains blocked by pre-existing malformed fragment `changes/319-interjection-annotation-filter.md`.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Replaced grammar-ending phrase permutations with shared pattern matching. `parser-selection-stage.ts` now splits standalone grammar endings through `grammar-ending.ts` instead of `STANDALONE_GRAMMAR_ENDINGS`; `subtitle-annotation-filter.ts` uses the same classifier for polite copula, negative copula, and explanatory endings instead of generated exact phrase sets.
|
||||||
|
|
||||||
|
Kept exclusion ownership cleaner: subtitle annotation exclusion remains in the shared filter, while `annotation-stage.ts` no longer carries stale duplicate subtitle-exclusion constants/helpers. Added regressions for pattern coverage including `ではないですか` splitting and no-POS grammar-ending annotation clearing.
|
||||||
|
|
||||||
|
Verification passed: targeted tokenizer/parser/annotation tests, Prettier check, `bun run typecheck`, `bun run test:fast`, `bun run test:env`, `bun run build`, and `bun run test:smoke:dist`. `bun run changelog:lint` is blocked by pre-existing malformed `changes/319-interjection-annotation-filter.md`; new fragment `changes/321-grammar-ending-pattern-filter.md` uses the current metadata format.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+68
@@ -0,0 +1,68 @@
|
|||||||
|
---
|
||||||
|
id: TASK-316
|
||||||
|
title: Fix macOS launcher playback exit with background stats daemon
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@Codex'
|
||||||
|
created_date: '2026-05-03 00:32'
|
||||||
|
updated_date: '2026-05-03 00:36'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- macos
|
||||||
|
- mpv
|
||||||
|
- stats
|
||||||
|
- runtime
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Launching a video on macOS when SubMiner is not already running should not leave the regular SubMiner app/tray alive after mpv closes. A separately running background stats daemon must remain non-blocking and must not be used as a foreground app dependency during playback startup/shutdown.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Closing a launcher/plugin-managed mpv session exits the launcher-started regular SubMiner app/tray after mpv closes.
|
||||||
|
- [x] #2 Explicit background/no-argument app launches still remain alive as before.
|
||||||
|
- [x] #3 A live background stats daemon is ignored by normal in-app stats server routing during regular app startup/playback, so the regular app never depends on or connects to that background daemon.
|
||||||
|
- [x] #4 Regression coverage demonstrates the managed playback shutdown and stats-daemon isolation behavior.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add failing regressions first: stats routing should ignore a live foreign background daemon for normal app URL/server startup, and managed playback disconnect should request app quit directly without reconnecting or depending on overlay/youtube disconnect guards.
|
||||||
|
2. Implement the narrow runtime changes in `src/main/runtime/stats-server-routing.ts` and, if needed, mpv disconnect plumbing in `src/core/services/mpv.ts` / event deps.
|
||||||
|
3. Preserve explicit persistent background/no-arg behavior by keeping `--managed-playback` as the only playback-exit marker.
|
||||||
|
4. Run focused tests (`stats-server-routing`, mpv client/protocol/event tests), then typecheck if focused checks pass.
|
||||||
|
5. Update changelog and task acceptance/final notes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented regular app stats routing isolation from live background daemon state and explicit managed-playback quit-on-disconnect wiring in main mpv event deps. Existing `MpvIpcClient` socket-close managed playback quit path remains covered.
|
||||||
|
|
||||||
|
`bun run test:fast` was attempted after focused verification. It failed in the broad `test:core:src` lane with Bun/node:test nested-test runner errors across many unrelated files and one transient subsync renderer API failure; rerunning the concrete subsync failure alone passed. Focused runtime tests, typecheck, and changelog lint remain green.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Regular app stats server routing no longer returns or depends on a live background daemon URL; it validates/cleans state, then uses the local app stats server path.
|
||||||
|
- Managed playback is now explicitly treated as a quit-on-disconnect launch mode in main mpv event deps, in addition to the existing mpv socket-close quit request.
|
||||||
|
- Added regressions for background daemon isolation and managed playback quit-on-disconnect classification.
|
||||||
|
- Added changelog fragment `changes/316-macos-playback-stats-daemon.md`.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/main/runtime/stats-server-routing.test.ts src/core/services/mpv.test.ts src/core/services/mpv-protocol.test.ts src/main/runtime/mpv-client-event-bindings.test.ts src/main/runtime/mpv-main-event-bindings.test.ts src/main/runtime/mpv-main-event-main-deps.test.ts`
|
||||||
|
- `bun run typecheck`
|
||||||
|
- `bun run changelog:lint`
|
||||||
|
- `bun test src/core/services/subsync.test.ts --test-name-pattern "deterministic _retimed"`
|
||||||
|
|
||||||
|
Blocked broader gate:
|
||||||
|
- `bun run test:fast` failed in `test:core:src` with Bun/node:test nested-test runner errors across unrelated files; the concrete subsync failure from that run passed when isolated.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
id: TASK-318
|
||||||
|
title: Keep JLPT underline color fixed after lookup selection
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@Codex'
|
||||||
|
created_date: '2026-05-03 03:17'
|
||||||
|
updated_date: '2026-05-03 03:19'
|
||||||
|
labels:
|
||||||
|
- overlay
|
||||||
|
- jlpt
|
||||||
|
- renderer
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Looking up a subtitle token can leave browser/Yomitan selection styling active. If that token has a JLPT class and another annotation class, the underline must remain the JLPT level color because underline color represents static JLPT classification, not the currently active annotation or lookup state.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 JLPT subtitle underlines retain their configured N1-N5 color after lookup/selection styling is applied.
|
||||||
|
- [x] #2 JLPT tokens that also have known, N+1, name, or frequency annotation classes keep their annotation text color behavior without changing the JLPT underline color.
|
||||||
|
- [x] #3 Renderer regression coverage verifies the CSS contract for the combined JLPT plus annotation case.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a focused CSS regression in `src/renderer/subtitle-render.test.ts` for JLPT tokens combined with higher-priority annotation classes and lookup/selection styling.
|
||||||
|
2. Run the focused renderer test and confirm it fails because selection rules do not lock `text-decoration-color`.
|
||||||
|
3. Update `src/renderer/style.css` to explicitly preserve JLPT underline decoration color in lookup/selection state selectors without changing text color priority.
|
||||||
|
4. Re-run the focused renderer test, then run the smallest relevant verification gate.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Verified TDD red/green for renderer CSS contract: `bun test src/renderer/subtitle-render.test.ts` first failed because `word-jlpt-n1::selection` lock was missing, then passed after adding explicit JLPT `text-decoration-color` selection rules. Also ran `bun run changelog:lint` and `bun run typecheck` successfully.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed JLPT subtitle underline color drift after dictionary lookup/selection by adding explicit `::selection` decoration-color locks for N1-N5 token classes in `src/renderer/style.css`. This preserves the JLPT underline as static classification while leaving known/N+1/name/frequency text color priority intact.
|
||||||
|
|
||||||
|
Added renderer CSS regression coverage for the JLPT selection lock and a user-visible changelog fragment.
|
||||||
|
|
||||||
|
Checks: `bun test src/renderer/subtitle-render.test.ts`; `bun run changelog:lint`; `bun run typecheck`.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+58
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
id: TASK-319
|
||||||
|
title: Suppress annotations for expressive interjection subtitles
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-05-03 03:18'
|
||||||
|
updated_date: '2026-05-03 03:20'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- subtitle-annotations
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- src/core/services/tokenizer/subtitle-annotation-filter.ts
|
||||||
|
- src/core/services/tokenizer/annotation-stage.test.ts
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Interjection-only subtitle tokens such as ハァ and はっ should remain hoverable as tokens but must not receive known, N+1, frequency, or JLPT annotation styling. Current behavior can still annotate these forms when dictionary/POS metadata does not trip the existing exclusion gate.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Standalone ハァ/はっ-style interjection tokens have annotation metadata cleared even when dictionary metadata exists.
|
||||||
|
- [x] #2 Filtering remains scoped so content-bearing non-interjection tokens still receive annotations.
|
||||||
|
- [x] #3 Regression coverage exercises the reported subtitle pattern: ハァ… / (ガーフィール)はっ!
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add failing regression coverage around annotation filtering for the reported interjection forms, including katakana ハァ and small-tsu はっ with surrounding subtitle punctuation/name text.
|
||||||
|
2. Tighten the shared subtitle annotation exclusion gate so expressive kana interjections clear annotation metadata without relying only on MeCab pos1=感動詞.
|
||||||
|
3. Run the focused tokenizer/annotation tests, then update acceptance criteria and notes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented via shared subtitle annotation exclusion term normalization: added はぁ so katakana ハァ normalizes into the existing term gate. Existing small-tsu kana SFX logic already covers はっ. Regression confirms both reported forms clear known/N+1/frequency/JLPT metadata while a normal noun keeps frequency annotation.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Added a regression for the reported subtitle pattern ハァ… / (ガーフィール)はっ!, with annotation metadata present on both interjection tokens.
|
||||||
|
- Extended the shared subtitle annotation exclusion term set so ハァ normalizes to はぁ and is stripped of annotation styling. Existing はっ handling remains covered by small-tsu kana SFX filtering.
|
||||||
|
- Added a change fragment for the user-visible bug fix.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- bun test src/core/services/tokenizer/annotation-stage.test.ts
|
||||||
|
- bun test src/core/services/tokenizer/annotation-stage.test.ts src/core/services/tokenizer.test.ts src/renderer/subtitle-render.test.ts
|
||||||
|
- bun run typecheck
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+58
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
id: TASK-320
|
||||||
|
title: Refresh current subtitle known-word highlight after successful mining
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-05-03 03:22'
|
||||||
|
updated_date: '2026-05-03 03:29'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- anki
|
||||||
|
- subtitle-annotations
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
After a sentence card is mined successfully, the mined word is added to the known-word cache and future subtitle appearances render as known. The currently displayed subtitle must also be refreshed immediately so the mined word turns known-color without waiting for a later cue.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Successful sentence-card mining refreshes the current displayed subtitle so newly mined known words render immediately.
|
||||||
|
- [x] #2 Unsuccessful/no-op mining does not refresh the current subtitle.
|
||||||
|
- [x] #3 Regression coverage verifies the successful and unsuccessful mining paths.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a regression test around AnkiIntegration known-word cache appends: when mined note info changes known words, a callback fires.
|
||||||
|
2. Make KnownWordCacheManager.appendFromNoteInfo report whether it changed the immediate known-word cache.
|
||||||
|
3. Add an AnkiIntegration known-word-cache-updated callback and invoke it after successful immediate append.
|
||||||
|
4. Wire main process callback to subtitleProcessingController.refreshCurrentSubtitle(appState.currentSubText), forcing active-line retokenization after popup/proxy or local mining updates the known-word cache.
|
||||||
|
5. Add a changelog fragment and run targeted tests plus typecheck.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented generic known-word-cache update notification instead of shortcut-only refresh. KnownWordCacheManager.appendFromNoteInfo now returns whether in-memory known words changed; AnkiIntegration notifies a callback after successful append. Main process wires that callback to subtitleProcessingController.refreshCurrentSubtitle(appState.currentSubText), forcing retokenization without using stale prefetch/cache data. Added regression coverage in anki-integration.test.ts.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Added a known-word-cache update callback on AnkiIntegration and wired it in the main process to refresh the current subtitle after mined note info changes known words.
|
||||||
|
- Made KnownWordCacheManager.appendFromNoteInfo report whether it changed the known-word cache, so refresh only happens after an actual immediate known-word append.
|
||||||
|
- Added regression coverage proving mined note info updates known words and emits the update notification.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- bun test src/anki-integration.test.ts src/anki-integration/known-word-cache.test.ts src/main/runtime/anki-actions.test.ts src/main/runtime/anki-actions-main-deps.test.ts
|
||||||
|
- bun run typecheck
|
||||||
|
- bun run changelog:lint currently blocked by pre-existing invalid metadata in changes/319-interjection-annotation-filter.md.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
---
|
||||||
|
id: TASK-321
|
||||||
|
title: Preserve word audio during manual clipboard card updates
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@Codex'
|
||||||
|
created_date: '2026-05-03 06:22'
|
||||||
|
updated_date: '2026-05-03 06:23'
|
||||||
|
labels:
|
||||||
|
- anki
|
||||||
|
- mining
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Manual Ctrl+Shift+C/Ctrl+V card updates on already-mined cards should refresh the sentence content and generated sentence media without removing or replacing the existing word/expression audio. The word is unchanged in this flow, so the configured word audio field must be left untouched while sentence audio remains forced-overwrite behavior from TASK-299.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Manual clipboard subtitle update replaces the resolved sentence audio field with newly generated sentence audio.
|
||||||
|
- [x] #2 Manual clipboard subtitle update does not include the configured word/expression audio field in Anki field updates.
|
||||||
|
- [x] #3 Animated image generation still uses the existing word audio duration for lead-in sync when configured.
|
||||||
|
- [x] #4 A regression test covers preserving word/expression audio during manual clipboard update.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Update the focused manual clipboard card update regression so generated audio is written only to the resolved sentence audio field and the configured word/expression audio field is absent from updateNoteFields payloads.
|
||||||
|
2. Run the focused test and confirm it fails for the existing TASK-299 behavior.
|
||||||
|
3. Change CardCreationService.updateLastAddedFromClipboard to stop merging/updating expression audio while preserving forced overwrite for sentence audio.
|
||||||
|
4. Run the focused test; then run adjacent Anki card-creation tests if the focused gate passes.
|
||||||
|
5. Update task acceptance criteria/final notes with verification results.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented narrow manual clipboard update change in CardCreationService.updateLastAddedFromClipboard: generated audio now force-overwrites only the resolved sentence audio field and no longer writes the configured word/expression audio field. Animated AVIF lead-in still runs from the original note info before image generation, preserving existing word-audio sync behavior.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Manual Ctrl+Shift+C/Ctrl+V card updates now leave the configured word/expression audio field untouched while force-replacing the resolved sentence audio field.
|
||||||
|
- Updated the regression test to assert the Anki update payload omits ExpressionAudio and only merges SentenceAudio with forced overwrite.
|
||||||
|
- Updated docs-site behavior notes and added a changelog fragment for the sentence-only manual audio replacement behavior.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- bun test src/anki-integration/card-creation-manual-update.test.ts src/anki-integration/card-creation.test.ts src/anki-integration/animated-image-sync.test.ts
|
||||||
|
- bun run typecheck
|
||||||
|
- bun run docs:test
|
||||||
|
- bun run docs:build
|
||||||
|
- git diff --check -- src/anki-integration/card-creation.ts src/anki-integration/card-creation-manual-update.test.ts docs-site/mining-workflow.md docs-site/anki-integration.md docs-site/configuration.md changes/322-preserve-word-audio-manual-update.md
|
||||||
|
|
||||||
|
Blocked gate:
|
||||||
|
- bun run changelog:lint is blocked by pre-existing malformed changes/319-interjection-annotation-filter.md, which is outside this task's files.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
id: TASK-322
|
||||||
|
title: Fix failing CI checks on PR 57
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-03 06:27'
|
||||||
|
updated_date: '2026-05-03 06:31'
|
||||||
|
labels:
|
||||||
|
- ci
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- 'https://github.com/ksyasuda/SubMiner/pull/57'
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Investigate and fix failing GitHub Actions checks on PR #57 (`tokenizer-updates`). Scope: use CI logs to identify root cause, apply focused local fix, and verify with relevant local checks.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Failing GitHub Actions check root cause is identified from logs.
|
||||||
|
- [x] #2 A focused code/test/docs fix is applied locally.
|
||||||
|
- [x] #3 Relevant local verification passes or blocked reason is documented.
|
||||||
|
- [x] #4 PR checks are rechecked or next CI action is documented.
|
||||||
|
- [x] #5 Actionable CodeRabbit PR comments are inspected and addressed or documented as non-actionable.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Fix CI changelog lint by adding a valid `type` frontmatter value to `changes/319-interjection-annotation-filter.md`.
|
||||||
|
2. Address unresolved CodeRabbit threads:
|
||||||
|
- `scripts/test-plugin-session-bindings.lua`: make `.tmp` creation portable across Unix/Windows shells.
|
||||||
|
- `src/core/services/tokenizer.ts`: pass `TokenizerAnnotationOptions` through `stripSubtitleAnnotationMetadata` paths so `sourceText` is honored.
|
||||||
|
- `src/main/runtime/mpv-main-event-main-deps.ts`: align overlay-runtime quit-on-disconnect predicate with `hasInitialPlaybackQuitOnDisconnectArg`.
|
||||||
|
- `src/renderer/handlers/mouse.test.ts`: make `elementFromPoint` stubs coordinate-sensitive.
|
||||||
|
3. Run focused checks: `bun run changelog:lint`, relevant tokenizer/main/mouse tests, and plugin Lua test path if available.
|
||||||
|
4. Recheck PR checks/comments after local verification.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
CI root cause: GitHub Actions `build-test-audit` failed during `bun run changelog:lint`; `changes/319-interjection-annotation-filter.md` must declare `type` as one of `added`, `changed`, `fixed`, `docs`, `internal`. Scope expanded by user to also address CodeRabbit comments on PR #57.
|
||||||
|
|
||||||
|
Implemented CI changelog metadata fix and unresolved CodeRabbit feedback locally. Full verification run: `bun run changelog:lint`, focused tests, `bun run typecheck`, `bun run test:fast`, `bun run test:env`, `bun run build`, `bun run test:smoke:dist`, `bun run format:check:src`. Rechecked PR checks: remote `build-test-audit` still shows the old failing run until this branch is pushed; CodeRabbit remains pending remotely until review reruns.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed PR #57 CI failure by converting `changes/319-interjection-annotation-filter.md` to valid changelog fragment metadata. Addressed unresolved CodeRabbit feedback by making plugin test `.tmp` creation portable, threading tokenizer annotation options through metadata stripping, aligning quit-on-disconnect predicates for Jellyfin playback, and strengthening mouse hit-test assertions. Also formatted two existing PR files required by the source format gate. Verification passed locally: changelog lint, focused tests, typecheck, test:fast, test:env, build, smoke dist, and format check. Remote PR checks still show the previous failed `build-test-audit` run until these local changes are pushed.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
id: TASK-323
|
||||||
|
title: Fix macOS overlay hiding while mpv remains active
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@codex'
|
||||||
|
created_date: '2026-05-03 07:41'
|
||||||
|
updated_date: '2026-05-03 07:48'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- macos
|
||||||
|
- overlay
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- src/core/services/overlay-visibility.ts
|
||||||
|
- src/window-trackers/macos-tracker.ts
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
macOS visible overlay can hide/reload during normal playback even while mpv, or the overlay over mpv, remains the active viewing surface. The fix should preserve overlay visibility and subtitle continuity during transient macOS focus/tracker flaps, while still hiding the overlay when the tracked mpv window is genuinely unavailable or another app is brought forward.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 When the macOS tracker has recent valid mpv geometry, transient focus/helper misses do not hide the visible overlay or force a reload.
|
||||||
|
- [x] #2 The overlay still hides when the tracked mpv window is genuinely lost beyond the existing tracking grace behavior.
|
||||||
|
- [x] #3 A regression test covers the macOS active-playback case where mpv/overlay focus is preserved despite a transient non-tracking state.
|
||||||
|
- [x] #4 Relevant docs or task notes are updated if behavior or verification guidance changes.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a failing regression in `src/core/services/overlay-visibility.test.ts`: on macOS, after the overlay is visible/tracked, a transient tracker state with `isTracking() === false` but non-null `getGeometry()` keeps the overlay visible, updates bounds, and does not call `hide()` or loading OSD.
|
||||||
|
2. Implement the minimal macOS preserve path in `src/core/services/overlay-visibility.ts`, mirroring the existing Windows transient non-minimized branch but without Windows z-order binding.
|
||||||
|
3. Preserve existing startup/lost-window behavior: `windowTracker: null` and `isTracking() === false` with `getGeometry() === null` still hide and show the first loading OSD.
|
||||||
|
4. Run focused tests for `src/core/services/overlay-visibility.test.ts`; then typecheck or the repo runtime verification lane if the focused patch passes.
|
||||||
|
5. Update TASK-323 notes/acceptance criteria with verification results.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Added a macOS overlay visibility regression for transient tracker loss with retained geometry. The test failed first because the old path marked tracker-not-ready and hid the overlay. Implemented a scoped preserve path in `src/core/services/overlay-visibility.ts`: macOS now keeps the visible overlay alive only when the tracker still has retained geometry; true loss with null geometry still hides and emits the existing loading OSD behavior. Added changelog fragment `changes/323-macos-overlay-tracker-flaps.md`.
|
||||||
|
|
||||||
|
Verification: `bun test src/core/services/overlay-visibility.test.ts` passed after the fix; `bun test src/window-trackers/macos-tracker.test.ts src/core/services/overlay-visibility.test.ts` passed; `bun run typecheck` passed; `bun run test:env` passed; isolated `bun test src/core/services/subsync.test.ts` passed; `bun run build` passed; `bun run test:smoke:dist` passed; `bun run changelog:lint` passed. `bun run test:fast` failed twice in an unrelated broad-suite interaction where `src/renderer/handlers/keyboard.ts` tried to use missing `window.electronAPI` while `src/core/services/subsync.test.ts` was running, followed by Bun node:test nested-test cascade errors.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed the macOS visible-overlay hide/reload path during normal playback by preserving the overlay when the tracker briefly reports non-tracking but still has retained mpv geometry. The overlay visibility service now treats that macOS state like a transient tracker flap: it keeps bounds/layer/order refreshed and leaves the overlay click-through instead of hiding or showing the loading OSD. True macOS loss remains unchanged: no tracker or null geometry still hides the overlay and uses the existing loading behavior.
|
||||||
|
|
||||||
|
Added regression coverage in `src/core/services/overlay-visibility.test.ts` for the active-playback case and added changelog fragment `changes/323-macos-overlay-tracker-flaps.md`.
|
||||||
|
|
||||||
|
Verification passed: focused overlay tests, macOS tracker + overlay tests, typecheck, `test:env`, isolated `subsync.test.ts`, build, dist smoke, and changelog lint. Full `test:fast` remains blocked by an unrelated broad-suite interaction where renderer keyboard state fires without `window.electronAPI` during `subsync.test.ts`, then Bun reports node:test cascade errors.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
---
|
||||||
|
id: TASK-324
|
||||||
|
title: Fix mpv playlist changes re-running app warmups
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-03 07:48'
|
||||||
|
updated_date: '2026-05-03 07:52'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- mpv
|
||||||
|
- overlay
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- launcher/
|
||||||
|
- src/core/services/mpv.ts
|
||||||
|
- src/main/runtime/
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
When moving to the next or previous mpv playlist entry, SubMiner should reconnect the existing app/runtime to mpv instead of treating the new video like a fresh app startup. Re-running startup warmups or creating another app session after the first video can interfere with overlay behavior.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Changing to next or previous mpv playlist item reuses the existing app/runtime instead of launching a new app session.
|
||||||
|
- [x] #2 Startup warmups are not repeated for playlist item changes after the first app startup.
|
||||||
|
- [x] #3 Overlay behavior remains available after playlist navigation.
|
||||||
|
- [x] #4 Regression test covers the playlist-change/reconnect path.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Reproduce the plugin auto-start regression with a failing Lua start-gate test.
|
||||||
|
2. Update mpv plugin auto-start handling so playlist/file changes with an already-running overlay reuse the existing app path and do not re-arm pause-until-ready warmup.
|
||||||
|
3. Add changelog fragment and run plugin/launcher verification.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
RED: `lua scripts/test-plugin-start-gate.lua` failed after changing the duplicate pause-until-ready auto-start expectations; it showed the loading gate was armed twice while overlay was already running.
|
||||||
|
|
||||||
|
GREEN: `plugin/subminer/process.lua` now disarms any old ready gate and only reasserts visible overlay state when auto-start fires while `state.overlay_running` is already true.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Updated the mpv Lua plugin auto-start reuse path so a file/playlist load with an already-running overlay no longer re-arms the pause-until-ready tokenization gate.
|
||||||
|
- Kept the existing app/control command reuse behavior: subsequent auto-starts reassert visible/hidden overlay state without issuing another `--start` subprocess.
|
||||||
|
- Added a changelog fragment for the mpv playlist overlay reuse fix.
|
||||||
|
|
||||||
|
Tests:
|
||||||
|
- `lua scripts/test-plugin-start-gate.lua` (red before fix, green after)
|
||||||
|
- `bun run test:plugin:src`
|
||||||
|
- `bun run changelog:lint`
|
||||||
|
- `bun run test:launcher:env:src`
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
id: TASK-325
|
||||||
|
title: Fix keyboard-only Yomitan popup shortcut precedence
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-04 01:19'
|
||||||
|
updated_date: '2026-05-04 01:22'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- keyboard
|
||||||
|
- yomitan
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
When keyboard-only mode is active and a Yomitan popup is visible, popup keyboard controls must win over overlay/mpv/session keybindings. Currently default overlay bindings such as bare `j` can fire instead of scrolling/navigating the Yomitan popup.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 With keyboard-only mode active and a Yomitan popup visible, pressing `j`/`k` forwards to the Yomitan popup instead of dispatching default session bindings such as primary subtitle track cycling.
|
||||||
|
- [x] #2 With keyboard-only mode inactive, existing popup-visible session binding behavior remains unchanged for bound keys.
|
||||||
|
- [x] #3 Regression coverage captures the keyboard-only popup precedence behavior.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a focused regression in `src/renderer/handlers/keyboard.test.ts`: keyboard-only mode + visible Yomitan popup + bare `KeyJ` session binding should forward `KeyJ` to the popup and not dispatch the mpv/session binding.
|
||||||
|
2. Verify the new test fails before production changes.
|
||||||
|
3. Patch `src/renderer/handlers/keyboard.ts` so popup key handling ignores session-binding precedence only while keyboard-driven mode is enabled.
|
||||||
|
4. Run targeted renderer keyboard tests, then update acceptance criteria and final notes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implementation: `handleYomitanPopupKeybind` now only lets configured session bindings take precedence when keyboard-driven mode is not enabled. In keyboard-only mode with a visible Yomitan popup, bare popup keys such as `KeyJ` forward to the popup instead of dispatching overlay/mpv keybindings. Added a regression covering `KeyJ` bound to `cycle sid`.
|
||||||
|
|
||||||
|
Verification: targeted test failed before the production change, then passed after the fix. Full local gates run: `bun test src/renderer/handlers/keyboard.test.ts --test-name-pattern "keyboard mode: popup keybinds take precedence"`, `bun test src/renderer/handlers/keyboard.test.ts`, `bun run changelog:lint`, `bun run typecheck`, `bun run test:fast`, `bun run test:env`, `bun run build`, `bun run test:smoke:dist`. Build initially required `bun install --frozen-lockfile`, submodule init, and `stats/` locked deps install because this worktree had no dependencies/submodules checked out.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed keyboard-only Yomitan popup shortcut precedence by allowing popup key forwarding to bypass configured session bindings only while keyboard-driven mode is active. This makes popup controls such as `j`/`k` win over default overlay/mpv bindings like primary subtitle track cycling, while preserving existing non-keyboard-only popup behavior where configured bindings still fire.
|
||||||
|
|
||||||
|
Added renderer keyboard regression coverage for the reported `KeyJ`/`cycle sid` conflict and added a changelog fragment for the user-visible overlay fix.
|
||||||
|
|
||||||
|
Verification passed: targeted red/green regression, full renderer keyboard test file, changelog lint, typecheck, `test:fast`, `test:env`, build, and dist smoke tests.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
id: TASK-325
|
||||||
|
title: Fix session chart known-word percentage denominator
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 01:19'
|
||||||
|
updated_date: '2026-05-04 01:23'
|
||||||
|
labels:
|
||||||
|
- stats
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Session detail known-word percentages should use the same filtered vocabulary occurrence rows for both known and total word counts. Current chart can divide known persisted word occurrences by raw token totals, causing excluded tokens to depress the known percentage.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Session known-word timeline API exposes cumulative filtered total word counts alongside known counts.
|
||||||
|
- [x] #2 Session detail chart computes known/unknown areas from filtered totals, not raw timeline token counts, when known-word data is available.
|
||||||
|
- [x] #3 Session summary known-word rate uses filtered persisted word totals where available and preserves safe fallback behavior when known-word data is unavailable.
|
||||||
|
- [x] #4 Regression tests cover filtered denominator behavior for the API and chart data path.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented in-place fix using existing persisted word occurrence rows. `/api/stats/sessions/:id/known-words-timeline` now returns cumulative `totalWordsSeen` from filtered persisted occurrences, and session known-word rates divide by the same filtered total. Session detail chart builds known/unknown areas from `totalWordsSeen` instead of raw timeline `tokensSeen`.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Known-word percentages on session charts now use filtered persisted word totals for both numerator and denominator. No migration/backfill required; data comes from existing `imm_word_line_occurrences`. Added regression coverage for the API response/rate and chart data builder.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+56
@@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
id: TASK-325
|
||||||
|
title: Keep JLPT underline color fixed with combined lookup annotations
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@Codex'
|
||||||
|
created_date: '2026-05-04 00:25'
|
||||||
|
updated_date: '2026-05-04 00:28'
|
||||||
|
labels:
|
||||||
|
- overlay
|
||||||
|
- jlpt
|
||||||
|
- renderer
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- TASK-318
|
||||||
|
- TASK-308
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Yomitan lookup on a subtitle token that has a JLPT level plus another annotation such as frequency or known-word highlighting can make the JLPT underline take the other annotation color. The underline must always remain the token's JLPT level color; other annotation classes may still control text color.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 A JLPT token combined with frequency styling keeps its underline set to the configured JLPT level color during lookup/selection styling.
|
||||||
|
- [x] #2 A JLPT token combined with known-word styling keeps its underline set to the configured JLPT level color during lookup/selection styling.
|
||||||
|
- [x] #3 Regression coverage exercises combined JLPT plus non-JLPT annotation selectors, including character span selection/hover styling used by lookup.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add focused renderer CSS regression coverage for combined `word-jlpt-n*` plus known/frequency classes, including `.c::selection`/`.c:hover` lookup paths.
|
||||||
|
2. Run `bun test src/renderer/subtitle-render.test.ts` and confirm the new assertion fails on the current CSS.
|
||||||
|
3. Update `src/renderer/style.css` so JLPT decoration color is locked on the token and child character spans without changing text color priority for known/frequency/name/N+1 annotations.
|
||||||
|
4. Re-run the focused renderer test, then run typecheck/changelog checks as scope requires.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Added red/green renderer CSS regression for combined JLPT plus known/N+1/frequency annotation classes and character hover lookup paths. Current CSS failed before the lock selectors were added; focused test passes after the CSS change.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed JLPT underline color drift for tokens that also carry known-word, N+1, or frequency annotation classes. The renderer CSS now explicitly locks the underline decoration color for combined JLPT annotation selectors, hover, character hover, and selection states while preserving the existing text color priority for other annotations.
|
||||||
|
|
||||||
|
Added renderer regression coverage for combined JLPT plus non-JLPT annotation selectors and lookup character hover paths. Added a user-visible changelog fragment.
|
||||||
|
|
||||||
|
Checks: `bun test src/renderer/subtitle-render.test.ts`; `bun run changelog:lint`; `bun run typecheck`; `bun run format:check:src`.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+32
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
id: TASK-326
|
||||||
|
title: Fix AniList post-watch update after skipped completion threshold
|
||||||
|
status: In Progress
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 00:33'
|
||||||
|
labels:
|
||||||
|
- anilist
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
AniList episode progress should sync reliably when playback reaches or passes the watched trigger point, even if mpv progress events jump over the exact threshold. Investigate why a completed watched episode did not update AniList and fix the root cause for post-watch tracking.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 When playback moves from before the completion threshold to any later position at or beyond the threshold, AniList queues or sends the episode progress update once.
|
||||||
|
- [x] #2 If playback is already past the completion threshold and the update has not yet been recorded for the current media/episode, AniList still queues or sends the update.
|
||||||
|
- [x] #3 AniList progress updates remain deduplicated for the same media/episode watch completion.
|
||||||
|
- [x] #4 A regression test covers the skipped-threshold or already-past-threshold case.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Fixed mpv `time-pos` ordering so post-watch checks read the fresh playback position after seeks.
|
||||||
|
- Wired manual mark-watched to run a forced AniList post-watch sync after the local watched mark succeeds.
|
||||||
|
- Added regressions for time-position ordering, manual watched sync, forced post-watch updates, and the Little Witch Academia filename parse.
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
id: TASK-326
|
||||||
|
title: Make stats word metrics honor filtering rules
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 01:35'
|
||||||
|
updated_date: '2026-05-04 02:08'
|
||||||
|
labels:
|
||||||
|
- stats
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Audit stats app metrics that show or derive from word totals and make them use filtered persisted vocabulary occurrences where the UI concept is learned/seen words. Preserve raw telemetry only where it is intentionally playback/token telemetry.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Stats UI word totals, word rates, lookup-per-word rates, and chart word series use filtered persisted word occurrences where available.
|
||||||
|
- [x] #2 Known-word metrics continue to use the same filtered denominator as known counts.
|
||||||
|
- [x] #3 Trend, overview, library, session, and episode surfaces are audited with regression coverage for changed data paths.
|
||||||
|
- [x] #4 Fallback behavior remains safe for sessions without persisted vocabulary occurrences.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Audit finding: raw `tokensSeen` / `totalTokensSeen` still feeds overview hints, dashboard aggregation, trends activity/progress/anime cumulative/library summary, lookup-per-100-word rates, session rows/recent sessions/episode sessions, and library/anime/media headers. Vocabulary and known unique word summaries already use persisted filtered vocabulary rows. Recommended design: query-time filtered word totals from existing `imm_word_line_occurrences`, with raw-token fallback only when a session has no persisted occurrence rows.
|
||||||
|
|
||||||
|
Implemented shared query-time filtered word counts. Session summaries, overview hints, daily/monthly rollups, anime/media library/detail rows, anime episode rows, episode/media sessions, trends activity/progress/anime cumulative, library summary, and lookup-per-100-word ratios now use filtered persisted word occurrences. Fallback remains raw token totals only for sessions with no persisted subtitle-line rows.
|
||||||
|
|
||||||
|
Follow-up implemented: Vocab frequency tables now apply the same tokenizer vocabulary predicate at read time, because old `imm_words` rows can predate current tokenizer exclusion rules. Vocabulary persistence and cleanup also mirror the broader subtitle-annotation grammar filters. Added common frequency stop terms observed in the stats vocabulary list to the shared tokenizer exclusion set so those rows are filtered consistently across subtitle annotations, persistence, cleanup, stats reads, and SQL word-count aggregates.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Stats word metrics now honor filtering rules through the read-model query layer. Existing persisted `imm_word_line_occurrences` provide the filtered denominator; no migration/backfill needed. Vocab tables filter stored rows on read using tokenizer vocabulary rules, so legacy noisy rows stop appearing without a migration. Added regressions for session/overview/rollup fallback behavior, trends/library lookup-rate behavior, vocabulary read filtering, cleanup filtering, and shared stop-term filtering.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
id: TASK-327
|
||||||
|
title: Persist stats page exclusion list in database
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 01:39'
|
||||||
|
updated_date: '2026-05-04 01:49'
|
||||||
|
labels:
|
||||||
|
- feature
|
||||||
|
- stats
|
||||||
|
- database
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Add database-backed persistence for the stats page exclusion list. On first load with the new schema, seed the new table from the existing exclusion list source so existing user choices are preserved. After migration, update database rows whenever the exclusion list is changed or saved so it persists across browser sessions indefinitely.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 A new small database table stores stats page exclusion entries.
|
||||||
|
- [x] #2 First load with the new schema seeds the table from the existing exclusion list source.
|
||||||
|
- [x] #3 Subsequent exclusion list save/change operations update the database-backed list.
|
||||||
|
- [x] #4 Regression coverage verifies migration/seed behavior and persistence updates.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented DB-backed stats exclusion list using schema version 18 and new `imm_stats_excluded_words` table. Added read/replace query helpers, service methods, and `/api/stats/excluded-words` GET/PUT routes. Stats frontend now loads exclusions from DB, seeds the empty DB table from legacy `localStorage` on first load, and writes each toggle/restore/clear through the API while keeping localStorage in sync for compatibility. Added focused regression coverage for schema/read-replace, API routes, API client, and frontend bootstrap/update behavior. Verification: `bun run typecheck` passed; `bun test src/core/services/__tests__/stats-server.test.ts stats/src/lib/api-client.test.ts stats/src/hooks/useExcludedWords.test.ts` passed; `bun test src/core/services/immersion-tracker/storage-session.test.ts` passed; `bun run docs:test` passed; `bun run format:check:stats` passed; `bun run changelog:lint` passed. Blocked/unrelated: `bun run typecheck:stats` fails in existing stats files (`AnilistSelector.tsx`, `reading-utils*`, `session-grouping.test.ts`, `yomitan-lookup.test.tsx`); `bun run test:immersion:sqlite:src` fails existing `recordSubtitleLine counts exact Yomitan tokens for session metrics` expected 4 got 3; `bun run docs:build` fails missing `@catppuccin/vitepress/theme/macchiato/mauve.css` import.
|
||||||
|
|
||||||
|
Added `src/core/services/__tests__/stats-server.test.ts` and `stats/src/hooks/useExcludedWords.test.ts` to the `test:core:src` allowlist so the new DB exclusion route/client/store regressions run in the maintained fast source lane.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Persisted the stats vocabulary exclusion list in SQLite with new schema version 18 table `imm_stats_excluded_words`. Added backend read/replace helpers and `/api/stats/excluded-words` GET/PUT routes, then wired the stats frontend exclusion store to load DB rows, seed an empty DB from legacy browser localStorage on first load, and update the DB on toggle/restore/clear. Updated docs and added changelog fragment. Focused tests and root typecheck pass; broader stats/docs/sqlite gates are blocked by unrelated existing failures recorded in notes.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
id: TASK-327
|
||||||
|
title: Restore stats daemon deferral when launching playback
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@Codex'
|
||||||
|
created_date: '2026-05-04 01:15'
|
||||||
|
updated_date: '2026-05-04 01:17'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- stats
|
||||||
|
- runtime
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Launching a video while a background stats daemon is already running must not fail with stats.serverPort already in use. Normal in-app stats startup should reuse the live daemon URL instead of binding a second stats server, while preserving managed playback shutdown behavior from TASK-316.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 A live background stats daemon from another process causes in-app stats URL resolution to return the daemon URL without starting a local stats server.
|
||||||
|
- [x] #2 Dead or stale daemon state is removed and local stats startup still works.
|
||||||
|
- [x] #3 Managed playback shutdown behavior remains covered by existing tests.
|
||||||
|
- [x] #4 Focused regression tests pass.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Update `src/main/runtime/stats-server-routing.test.ts` first so a live foreign daemon must return its daemon URL and skip local server startup.
|
||||||
|
2. Run the focused routing test to confirm the regression fails red.
|
||||||
|
3. Update `src/main/runtime/stats-server-routing.ts` to return `{ source: 'background' }` for live foreign daemon state, clear stale/self-owned state, and keep local startup fallback unchanged.
|
||||||
|
4. Run focused stats routing tests plus managed playback tests touched by TASK-316.
|
||||||
|
5. Update changelog and task acceptance/final notes.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented via TDD: first changed `stats-server-routing.test.ts` to require live foreign daemon deferral and observed the expected red failure. Then restored `stats-server-routing.ts` to return the daemon URL with `source: 'background'` when daemon state belongs to a live other process. Stale/dead and self-owned stale cleanup paths remain local fallback.
|
||||||
|
|
||||||
|
Verification passed: `bun test src/main/runtime/stats-server-routing.test.ts`; focused runtime suite for stats daemon + TASK-316 managed playback files; `bun run typecheck`; `bun run test:fast`.
|
||||||
|
|
||||||
|
`bun run changelog:lint` is blocked by pre-existing unrelated `changes/326-anilist-time-position-post-watch.md` missing valid `type` metadata; `changes/327-stats-daemon-deferral.md` follows the expected fragment format.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Restored in-app stats startup deferral to a live background stats daemon from another process, returning the daemon URL and skipping local stats server binding.
|
||||||
|
- Kept stale/dead daemon cleanup and local stats startup fallback behavior intact.
|
||||||
|
- Added a changelog fragment for the restored port-conflict fix.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/main/runtime/stats-server-routing.test.ts`
|
||||||
|
- `bun test src/main/runtime/stats-server-routing.test.ts src/core/services/mpv.test.ts src/core/services/mpv-protocol.test.ts src/main/runtime/mpv-client-event-bindings.test.ts src/main/runtime/mpv-main-event-bindings.test.ts src/main/runtime/mpv-main-event-main-deps.test.ts src/main/runtime/stats-cli-command.test.ts src/stats-daemon-control.test.ts`
|
||||||
|
- `bun run typecheck`
|
||||||
|
- `bun run test:fast`
|
||||||
|
|
||||||
|
Blocked check:
|
||||||
|
- `bun run changelog:lint` fails on unrelated pre-existing `changes/326-anilist-time-position-post-watch.md` metadata, not this change.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+64
@@ -0,0 +1,64 @@
|
|||||||
|
---
|
||||||
|
id: TASK-328
|
||||||
|
title: Keep subtitle prefetch running after immediate cached annotation render
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-04 01:26'
|
||||||
|
updated_date: '2026-05-04 01:30'
|
||||||
|
labels: []
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- >-
|
||||||
|
/home/sudacode/projects/japanese/SubMiner/src/main/runtime/mpv-main-event-actions.ts
|
||||||
|
- /home/sudacode/projects/japanese/SubMiner/src/main.ts
|
||||||
|
- >-
|
||||||
|
/home/sudacode/projects/japanese/SubMiner/src/core/services/subtitle-processing-controller.ts
|
||||||
|
- >-
|
||||||
|
/home/sudacode/projects/japanese/SubMiner/backlog/completed/task-197 -
|
||||||
|
Eliminate-per-line-plain-subtitle-flash-on-prefetch-cache-hit.md
|
||||||
|
- >-
|
||||||
|
/home/sudacode/projects/japanese/SubMiner/backlog/completed/task-196 -
|
||||||
|
Fix-subtitle-prefetch-cache-key-mismatch-and-active-cue-window.md
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Cached subtitle annotation hits should render annotated subtitles immediately without starving the subtitle prefetcher. Current evidence: the mpv subtitle-change path emits the cached payload before forwarding the subtitle change; in the runtime, the cached emit resumes prefetch, then the forwarded change pauses it, and no async controller emit follows on a cache hit to resume it again.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Cached subtitle annotation payloads still render immediately without a plain subtitle flash.
|
||||||
|
- [x] #2 A cache-hit subtitle-change event leaves subtitle prefetch eligible to continue after the immediate annotated emit.
|
||||||
|
- [x] #3 Cache-miss subtitle-change behavior still shows plain text immediately while async annotation processing runs.
|
||||||
|
- [x] #4 Regression coverage proves the cache-hit ordering that prevents prefetch from staying paused.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a focused regression test in `src/main/runtime/mpv-main-event-actions.test.ts` proving cache-hit subtitle changes pause live prefetch work before emitting the immediate annotated payload, so the emit resumes prefetch last.
|
||||||
|
2. Change `createHandleMpvSubtitleChangeHandler` ordering in `src/main/runtime/mpv-main-event-actions.ts`: set current text, consume cache, forward `onSubtitleChange(text)`, then emit cached payload or plain fallback, then refresh Discord presence.
|
||||||
|
3. Preserve existing behavior: cache hits emit annotated payload synchronously; cache misses emit `{ text, tokens: null }` synchronously.
|
||||||
|
4. Run focused tests for `mpv-main-event-actions`; run adjacent controller/prefetch tests if ordering touches cache assumptions.
|
||||||
|
5. Update TASK-328 acceptance criteria and add a changelog fragment if the repo requires one for this user-visible fix.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Red/green: added cache-hit ordering regression in `src/main/runtime/mpv-main-event-actions.test.ts`; first run failed with actual order `emit:annotated` before `process:line`. Fix narrows ordering change to cache hits only: cache hit calls `onSubtitleChange` before immediate annotated emit; cache miss keeps plain broadcast before processing.
|
||||||
|
|
||||||
|
Verification: `bun test src/main/runtime/mpv-main-event-actions.test.ts` passed; `bun test src/core/services/subtitle-processing-controller.test.ts` passed; `bun test src/core/services/subtitle-prefetch.test.ts` passed; combined targeted test command passed 35 tests; `bun run typecheck` passed. `bun run changelog:lint` blocked by unrelated pre-existing `changes/326-anilist-time-position-post-watch.md` missing a valid `type` metadata line.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed the subtitle cache-hit ordering that could leave subtitle prefetch paused after an immediate annotated render. Cache hits now forward the subtitle change first, then emit the cached annotated payload, so the runtime pause happens before the emit path resumes prefetch. Cache misses keep the previous plain-subtitle-first path so fallback text still appears immediately while tokenization runs.
|
||||||
|
|
||||||
|
Added a regression test for the cache-hit ordering and a changelog fragment for the overlay fix. Verified with targeted subtitle runtime/controller/prefetch tests and `bun run typecheck`; changelog lint is blocked by an unrelated existing malformed fragment for TASK-326.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
id: TASK-329
|
||||||
|
title: Keep JLPT subtitle styling underline-only
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 02:13'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- renderer
|
||||||
|
- jlpt
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- src/renderer/style.css
|
||||||
|
- src/renderer/subtitle-render.test.ts
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Fix subtitle token styling so JLPT metadata never changes token text color. JLPT should only render the level marker/underline affordance while known, n+1, name-match, and frequency colors retain priority.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 JLPT-only subtitle tokens do not set token text color.
|
||||||
|
- [x] #2 JLPT level marker/underline still uses configured JLPT color.
|
||||||
|
- [x] #3 Existing known, n+1, name-match, and frequency text colors remain unchanged.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Changed subtitle JLPT styling from text color to underline decoration and updated renderer CSS regression coverage.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/renderer/subtitle-render.test.ts`
|
||||||
|
- `bunx prettier --check src/renderer/subtitle-render.test.ts src/renderer/style.css`
|
||||||
|
- `bun run typecheck`
|
||||||
|
|
||||||
|
Blocked:
|
||||||
|
- `bun run test:fast` fails in existing dirty stats/session work: `recordSubtitleLine counts exact Yomitan tokens for session metrics` expects `tokensSeen` 4 but gets 3.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
---
|
||||||
|
id: TASK-330
|
||||||
|
title: Fix PR 60 CI failures and CodeRabbit feedback
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-04 02:50'
|
||||||
|
updated_date: '2026-05-04 02:59'
|
||||||
|
labels:
|
||||||
|
- ci
|
||||||
|
- pr-review
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- 'https://github.com/ksyasuda/SubMiner/pull/60'
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Resolve failing GitHub Actions checks and actionable unresolved CodeRabbit review feedback on PR #60 (Persist stats exclusions in DB and fix word metrics filtering). Keep fixes scoped to the PR behavior and preserve existing project patterns.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Failing GitHub Actions checks for PR #60 have an identified root cause and local fix.
|
||||||
|
- [x] #2 All actionable unresolved CodeRabbit review comments on PR #60 are addressed locally or explicitly documented as non-actionable.
|
||||||
|
- [x] #3 Relevant local verification passes for the changed code paths.
|
||||||
|
- [x] #4 Task notes summarize CI failure context, review-comment handling, and any residual verification gaps.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Resolve PR #60 context and inspect GitHub Actions failures with the gh-fix-ci workflow.
|
||||||
|
2. Fetch unresolved review threads with the gh-address-comments workflow, focusing on CodeRabbit actionable comments.
|
||||||
|
3. Read the touched files/tests around the failing paths and comments; identify root cause before edits.
|
||||||
|
4. Apply minimal fixes with regression coverage where appropriate.
|
||||||
|
5. Run targeted verification first, then broader repo gates as time permits.
|
||||||
|
6. Update Backlog notes/acceptance criteria with CI/comment outcomes and residual risks.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Resolved PR #60 CI failure by restoring raw `tokensSeen` for session summaries while keeping filtered persisted word counts in aggregate/known-word paths. Addressed CodeRabbit feedback: fixed missing `headword` test fixture binding; paged vocabulary stats past filtered rows; preserved lifetime/rollup totals when retained-session recomputation is partial; emitted flat known-word timeline points for zero-visible-word line gaps; restored localStorage mocks; added rollback/retry behavior for excluded-word store persistence/initialization.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed the PR #60 CI failure and addressed actionable CodeRabbit feedback.
|
||||||
|
|
||||||
|
Key changes:
|
||||||
|
- Restored exact Yomitan token counts for session summary metrics while leaving filtered word counts for aggregate and known-word calculations.
|
||||||
|
- Fixed malformed query test fixtures by binding `headword` into `imm_words` inserts.
|
||||||
|
- Updated vocabulary stats to page until enough visible rows are collected after post-query filtering.
|
||||||
|
- Made library/detail/rollup read models preserve lifetime or stored rollup totals when retained-session recomputation is partial, including dashboard rollup-derived word metrics.
|
||||||
|
- Kept known-word timeline line positions stable by emitting flat points for missing line indexes.
|
||||||
|
- Made excluded-word persistence rollback on failed writes, allow initialization retries after transient load failures, and restored mocked `localStorage` in tests.
|
||||||
|
|
||||||
|
Verification passed:
|
||||||
|
- `bun run typecheck`
|
||||||
|
- `bun run test:fast`
|
||||||
|
- `bun run test:env`
|
||||||
|
- `bun run build`
|
||||||
|
- `bun run test:smoke:dist`
|
||||||
|
- `bun run format:check:src`
|
||||||
|
- `git diff --check`
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
id: TASK-331
|
||||||
|
title: Address unresolved CodeRabbit comments on PR 57
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-04 03:21'
|
||||||
|
updated_date: '2026-05-04 03:27'
|
||||||
|
labels:
|
||||||
|
- pr-feedback
|
||||||
|
- coderabbit
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- 'https://github.com/ksyasuda/SubMiner/pull/57'
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Assess and fix unresolved CodeRabbit review comments on PR #57 after rebasing tokenizer-updates. Scope includes manual clipboard SentenceAudio guard, tokenizer standalone particle blacklist, AniList guessit fallback confidence, startup gate duplicate auto-start, and small regression-test hardening where applicable.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Each unresolved CodeRabbit comment is either fixed or explicitly assessed as not applicable against current code.
|
||||||
|
- [x] #2 Regression tests cover behavior changes where practical.
|
||||||
|
- [x] #3 Relevant focused tests and typecheck pass.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Fixed all verified actionable CodeRabbit comments from PR #57: manual clipboard updates no longer fall back to ExpressionAudio when SentenceAudio is absent, connective particle phrases no longer suppress lexical verb readings like 立って, guessit output only borrows parser season/episode from non-low-confidence parses, duplicate auto-start no longer releases an active pause-until-ready gate, JLPT CSS tests block text-decoration shorthand underlines, post-watch update rejection logging is covered, and duplicate quit-on-disconnect predicate code is shared.
|
||||||
|
|
||||||
|
Verification: bun test src/anki-integration/card-creation-manual-update.test.ts src/core/services/tokenizer/annotation-stage.test.ts src/core/services/anilist/anilist-updater.test.ts src/main/runtime/mpv-main-event-actions.test.ts src/renderer/subtitle-render.test.ts; lua scripts/test-plugin-start-gate.lua; bun run typecheck; bun run test:fast.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
+60
@@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
id: TASK-332
|
||||||
|
title: Fix subtitle frequency annotation missing ranks shown in Yomitan popup
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-05-04 03:29'
|
||||||
|
updated_date: '2026-05-04 03:41'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- tokenizer
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Subtitle frequency highlighting can miss a token even when the Yomitan popup shows a rank within the configured threshold. Reproduced with `第二走者とアンカーは\n中継地点に速やかに移動!`: Yomitan popup shows `第二` JPDB rank 1820, but SubMiner tokenizer output has no `frequencyRank` for `第二`, so renderer cannot annotate it.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 `第二` in `第二走者とアンカーは\n中継地点に速やかに移動!` receives the Yomitan rank shown by the popup when frequency highlighting is enabled.
|
||||||
|
- [x] #2 Regression test covers the Yomitan scan/frequency ingestion path for exact popup-derived ranks.
|
||||||
|
- [x] #3 Existing tokenizer frequency tests continue to pass.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Reproduce and inspect the missing `第二` rank path with tokenizer probes and focused tests.
|
||||||
|
2. Preserve exact Yomitan scan frequency ranks when the matching frequency entry omits reading metadata but has the same exact term.
|
||||||
|
3. Allow ranked ordinal prefix-noun tokens (`第` + numeric noun, e.g. `第二`) through annotation POS filtering while keeping standalone prefixes excluded.
|
||||||
|
4. Verify with focused tokenizer/runtime/annotation tests, typecheck, changelog lint, and a live-style Yomitan profile probe.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Root-cause probe against temp copy of Yomitan profile: tokenizer returns no frequencyRank for `第二`; renderer config `topX` is 10000, so render threshold is not the blocker.
|
||||||
|
|
||||||
|
User approved implementation plan on 2026-05-04.
|
||||||
|
|
||||||
|
Verification: `bun test src/core/services/tokenizer.test.ts src/core/services/tokenizer/yomitan-parser-runtime.test.ts src/core/services/tokenizer/annotation-stage.test.ts` passed (192 tests).
|
||||||
|
|
||||||
|
Verification: `bun run typecheck` passed.
|
||||||
|
|
||||||
|
Verification: `bun run changelog:lint` passed.
|
||||||
|
|
||||||
|
Verification: `bun run get-frequency:electron -- --yomitan-user-data /tmp/subminer-yomitan-probe-909423 "第二走者とアンカーは\\n中継地点に速やかに移動!"` produced `第二` with `frequencyRank: 1820`.
|
||||||
|
|
||||||
|
Finalization check: implementation plan updated to reflect the discovered POS-filter root cause and completed solution.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed subtitle frequency annotation for `第二` by allowing ranked ordinal prefix-noun compounds through annotation POS filtering. Also made scan rank matching tolerate exact frequency entries where one side omits reading metadata. Verified with tokenizer/runtime/annotation tests, typecheck, changelog lint, and a live-style Yomitan profile probe showing `第二` now receives frequencyRank 1820.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
id: TASK-333
|
||||||
|
title: Suppress aru subtitle annotations
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 04:39'
|
||||||
|
updated_date: '2026-05-04 05:02'
|
||||||
|
labels:
|
||||||
|
- tokenizer
|
||||||
|
- annotations
|
||||||
|
- bug
|
||||||
|
dependencies: []
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Add `ある` / `有る` to the subtitle annotation suppression path so `aru` tokens remain hoverable and never receive N+1, JLPT, frequency, or name-match annotation metadata. Known-word highlighting is special: if a filtered `aru` token is known and known highlighting is enabled, it should still render as known.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 `ある` and kanji headword/surface variants such as `有る` are excluded by the subtitle annotation filter.
|
||||||
|
- [x] #2 Annotation stripping clears N+1, JLPT, frequency, and name metadata for `aru` tokens while preserving token hover data.
|
||||||
|
- [x] #3 Known-word highlighting still applies to filtered tokens, including `aru`, when known-word lookup marks them known.
|
||||||
|
- [x] #4 Regression coverage fails before the fix and passes after.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add `ある`/`有る`/`在る` to the shared subtitle annotation hard-exclusion terms.
|
||||||
|
2. Preserve/recompute known-word status for filtered tokens while stripping N+1, JLPT, frequency, and name metadata.
|
||||||
|
3. Add RED/GREEN unit and tokenizer regression coverage, plus a changelog fragment.
|
||||||
|
4. Run targeted tests and full handoff gate.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
TDD path: added failing annotation-stage coverage first. Initial implementation made targeted tests pass, then broader tokenizer coverage revealed an older fixture expecting `ある` to remain lexical; updated that integration expectation to the new requested behavior. Follow-up correction: known-word highlighting is the lone annotation exception for filtered tokens, so the strip path now preserves known state and `annotateTokens` recomputes known status for filtered tokens while still clearing N+1/JLPT/frequency/name metadata.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Suppressed non-known subtitle annotations for `aru` existence verbs by adding `ある`, `有る`, and `在る` to the shared hard-exclusion list. Corrected the filtered-token path so known-word highlighting still applies whenever known highlighting is enabled; filtered tokens now keep/gain `isKnown` but still lose N+1, JLPT, frequency, and name metadata.
|
||||||
|
|
||||||
|
Added and updated annotation-stage and tokenizer regression coverage for `aru`, particles, helper fragments, interjections, and other filtered known tokens. Added `changes/333-aru-annotation-filter.md`.
|
||||||
|
|
||||||
|
Validation passed: RED failures observed before implementation/correction; `bun test src/core/services/tokenizer/annotation-stage.test.ts`; `bun test src/core/services/tokenizer.test.ts`; `bun run typecheck`; `bun run format:check:src`; `bun run changelog:lint`; `bun run test:fast`; `bun run test:env`; `bun run build`; `bun run test:smoke:dist`.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
id: TASK-334
|
||||||
|
title: Assess and address PR 57 latest CodeRabbit comments
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- '@codex'
|
||||||
|
created_date: '2026-05-04 05:03'
|
||||||
|
updated_date: '2026-05-04 05:07'
|
||||||
|
labels:
|
||||||
|
- pr-feedback
|
||||||
|
- coderabbit
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- 'https://github.com/ksyasuda/SubMiner/pull/57'
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Assess the latest CodeRabbit review on PR #57 submitted 2026-05-04 and fix verified issues. Current scope: AniList post-watch duplicate-write race, known-word cache mutation return value, and manual-mark AniList rejection isolation with regression coverage.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Each latest CodeRabbit comment is either fixed or explicitly assessed as not applicable against current code.
|
||||||
|
- [x] #2 Regression tests cover behavior changes where practical.
|
||||||
|
- [x] #3 Relevant focused tests and typecheck pass, or any blocked verification is documented.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Verify each latest CodeRabbit finding against current code.
|
||||||
|
2. Update known-word cache append return semantics so cache clears are reported as mutations when state existed.
|
||||||
|
3. Acquire AniList post-watch in-flight before async gating and release in finally.
|
||||||
|
4. Isolate manual-mark AniList callback failures in IPC and add a rejection-path regression test.
|
||||||
|
5. Run focused tests for touched areas plus typecheck; document any blocked verification.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Verified latest CodeRabbit review submitted 2026-05-04 on PR #57. Fixed all three current items: known-word cache mutation return after cache reset, AniList post-watch concurrent in-flight race, and manual watched mark isolation from AniList callback failures. Added regression tests for each path and a changelog fragment.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed latest PR #57 CodeRabbit feedback by reporting known-word cache clears as mutations during immediate append, acquiring AniList post-watch in-flight before awaited gates to prevent duplicate writes, and isolating manual watched mark success from AniList post-watch callback failures. Added focused regression coverage in known-word cache, AniList post-watch, and IPC tests, plus a changelog fragment.
|
||||||
|
|
||||||
|
Verification: bun test src/anki-integration/known-word-cache.test.ts; bun test src/main/runtime/anilist-post-watch.test.ts; bun test src/core/services/ipc.test.ts; bun run typecheck; bun run format:check:src; bun run changelog:lint; bun run test:fast; bun run test:env; bun run build; bun run test:smoke:dist.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
id: TASK-335
|
||||||
|
title: Fix Linux AniList setup gate using stored keyring token
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 05:26'
|
||||||
|
updated_date: '2026-05-04 05:30'
|
||||||
|
labels:
|
||||||
|
- anilist
|
||||||
|
- bug
|
||||||
|
- linux
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
AniList setup page reopens on Linux video launch even when the token exists in secret storage and post-watch updates can use it. Investigate setup gating versus update token refresh paths and make them agree on stored-token availability.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Launching a video on Linux with an AniList token available in secret storage does not show the AniList setup page just because config accessToken is empty.
|
||||||
|
- [x] #2 If secret storage load fails, setup/errors surface the underlying storage problem instead of behaving like an empty token.
|
||||||
|
- [x] #3 Regression coverage exercises the setup-gate token availability path and preserves post-watch update token behavior.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Patched AniList setup callback to require successful token persistence before caching/closing the setup flow. Patched config reload auth refresh to pass allowSetupPrompt:false so normal startup/playback reloads do not open AniList setup UI. Added regression coverage around persistence failure and non-prompting config refresh.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed AniList setup/login flow so failed encrypted token persistence no longer reports success or seeds only an in-memory token. Config reload now refreshes AniList auth state without opening the setup window during playback, reducing repeated Linux setup prompts when safeStorage/keyring resolution fails.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+39
@@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
id: TASK-337
|
||||||
|
title: Fix transient Linux safeStorage failure poisoning AniList token store
|
||||||
|
status: Done
|
||||||
|
assignee: []
|
||||||
|
created_date: '2026-05-04 05:51'
|
||||||
|
updated_date: '2026-05-04 05:52'
|
||||||
|
labels:
|
||||||
|
- anilist
|
||||||
|
- bug
|
||||||
|
- linux
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
AniList token store memoizes a false safeStorage availability result. On Linux this can happen before Electron/keyring readiness, causing later post-watch updates and setup saves to report missing login/encryption unavailable even after the keyring is available.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 A transient safeStorage unavailable result does not prevent a later stored AniList token load once encryption is available.
|
||||||
|
- [x] #2 A transient safeStorage unavailable result does not prevent a later AniList token save once encryption is available.
|
||||||
|
- [x] #3 Regression coverage protects the retry behavior.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Changed AniList token store safeStorage probe to memoize successful probes only. Failed probes now return false without poisoning later load/save attempts, covering Linux startup windows where Electron safeStorage/keyring can be unavailable before app readiness but usable later. Added regression test for transient unavailable -> available load/save retry.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Fixed a Linux AniList auth failure where an early safeStorage/keyring miss was cached for the whole process. Stored tokens now load and setup tokens can save after GNOME libsecret becomes available later in startup.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
id: TASK-338
|
||||||
|
title: Fix known-word highlight on standalone subtitle particles
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- codex
|
||||||
|
created_date: '2026-05-04 05:52'
|
||||||
|
updated_date: '2026-05-04 05:57'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- subtitle
|
||||||
|
- tokenizer
|
||||||
|
dependencies: []
|
||||||
|
references:
|
||||||
|
- src/core/services/tokenizer/annotation-stage.ts
|
||||||
|
- src/core/services/tokenizer/subtitle-annotation-filter.ts
|
||||||
|
- src/renderer/subtitle-render.ts
|
||||||
|
priority: medium
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Standalone grammar particles such as に should not render as known-word green when they appear in the known-word cache as readings for other words. Keep known-word coloring for lexical tokens, but prevent grammar-excluded subtitle tokens from getting known-green.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Standalone grammar particles like に do not retain isKnown after subtitle annotation filtering.
|
||||||
|
- [x] #2 Lexical known-word tokens still render as known when not grammar-excluded.
|
||||||
|
- [x] #3 Focused regression test covers the particle false-positive path.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add a focused regression in `src/core/services/tokenizer/annotation-stage.test.ts` showing standalone particle `に` is grammar-excluded and does not retain `isKnown` even when `isKnownWord('に')` is true.
|
||||||
|
2. Run the focused tokenizer annotation test and confirm the new test fails for the current behavior.
|
||||||
|
3. Patch `src/core/services/tokenizer/annotation-stage.ts` so grammar-excluded tokens clear known status while still stripping N+1/frequency/JLPT/name metadata.
|
||||||
|
4. Run the focused test file, then inspect diff and update task acceptance criteria.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Implemented tokenizer annotation filtering so grammar-excluded subtitle tokens clear known-word status instead of retaining green known coloring. Added focused regression for known-word-cache particle false positive and updated existing expectations for unified annotation clearing. Verification: `bun test src/core/services/tokenizer/annotation-stage.test.ts --test-name-pattern "clears known status from standalone particles"` failed before the production patch; after patch, `bun test src/core/services/tokenizer/annotation-stage.test.ts`, `bun test src/core/services/tokenizer.test.ts`, combined tokenizer tests, `bun run typecheck`, `bun run changelog:lint`, and `bun run test:fast` passed.
|
||||||
|
|
||||||
|
Full handoff gate follow-up: `bun run test:env` and `bun run build` passed. `bun run test:smoke:dist` failed outside this tokenizer change in `dist/core/services/overlay-manager.test.js` because current dirty overlay-window code calls `window.getTitle()` on a test mock that does not provide it.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Summary:
|
||||||
|
- Cleared `isKnown` for grammar-excluded subtitle tokens in the tokenizer annotation stage, preventing standalone particles such as `に` from rendering as known just because a known-word deck contains a matching reading.
|
||||||
|
- Added a focused regression test for the known-word-cache false positive and updated tokenizer expectations so helper/grammar spans consistently clear all subtitle annotations.
|
||||||
|
- Added changelog fragment `changes/338-known-word-particle-highlights.md`.
|
||||||
|
|
||||||
|
Verification:
|
||||||
|
- `bun test src/core/services/tokenizer/annotation-stage.test.ts --test-name-pattern "clears known status from standalone particles"` failed before the production patch.
|
||||||
|
- `bun test src/core/services/tokenizer/annotation-stage.test.ts`
|
||||||
|
- `bun test src/core/services/tokenizer.test.ts`
|
||||||
|
- `bun test src/core/services/tokenizer/annotation-stage.test.ts src/core/services/tokenizer.test.ts`
|
||||||
|
- `bun run typecheck`
|
||||||
|
- `bun run changelog:lint`
|
||||||
|
- `bun run test:fast`
|
||||||
|
- `bun run test:env`
|
||||||
|
- `bun run build`
|
||||||
|
|
||||||
|
Blocked/External:
|
||||||
|
- `bun run test:smoke:dist` currently fails outside this tokenizer change in `dist/core/services/overlay-manager.test.js`: dirty overlay-window code calls `window.getTitle()` on a test mock without that method.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
+65
@@ -0,0 +1,65 @@
|
|||||||
|
---
|
||||||
|
id: TASK-340
|
||||||
|
title: Restore default replay and next subtitle overlay keybindings
|
||||||
|
status: Done
|
||||||
|
assignee:
|
||||||
|
- Codex
|
||||||
|
created_date: '2026-05-04 06:25'
|
||||||
|
updated_date: '2026-05-04 06:49'
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- keybindings
|
||||||
|
- overlay
|
||||||
|
- mpv
|
||||||
|
dependencies: []
|
||||||
|
priority: high
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||||
|
Default overlay/mpv keybindings for replaying the current subtitle line and playing the next subtitle line are not firing. Shift+H and Shift+L subtitle jumps still work, but Ctrl+Shift+H should replay the current subtitle and pause at subtitle end, and Ctrl+Shift+L should play the next subtitle and pause at subtitle end. Keep the other built-in defaults working.
|
||||||
|
<!-- SECTION:DESCRIPTION:END -->
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
<!-- AC:BEGIN -->
|
||||||
|
- [x] #1 Default keybindings include working replay-current-subtitle and play-next-subtitle bindings on Ctrl+Shift+H and Ctrl+Shift+L.
|
||||||
|
- [x] #2 Replay-current-subtitle dispatch reaches the existing runtime path that pauses at the subtitle end.
|
||||||
|
- [x] #3 Play-next-subtitle dispatch reaches the existing runtime path that pauses at the subtitle end.
|
||||||
|
- [x] #4 Existing default keybindings continue to compile/register without regressions.
|
||||||
|
- [x] #5 Focused regression tests cover the broken default bindings.
|
||||||
|
<!-- AC:END -->
|
||||||
|
|
||||||
|
## Implementation Plan
|
||||||
|
|
||||||
|
<!-- SECTION:PLAN:BEGIN -->
|
||||||
|
1. Add focused regression coverage that the resolved defaults compile on Linux without dropping Ctrl+Shift+H/L, and that those keys map to replayCurrentSubtitle/playNextSubtitle session actions.
|
||||||
|
2. Move the default session-help shortcut off Ctrl/Cmd+Shift+H to a non-conflicting shortcut, then update generated/default config docs so shipped defaults match documentation.
|
||||||
|
3. Add/adjust coverage for default replay/next bindings and run targeted Bun tests plus plugin session-binding smoke.
|
||||||
|
|
||||||
|
4. Follow-up after live test: fix the mpv plugin shifted-letter key-name conversion so `Ctrl+Shift+KeyL` registers using mpv's uppercase letter form and add Lua regression coverage for both `Ctrl+Shift+L` and `Shift+L`.
|
||||||
|
<!-- SECTION:PLAN:END -->
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
<!-- SECTION:NOTES:BEGIN -->
|
||||||
|
Root cause: default `shortcuts.openSessionHelp = CommandOrControl+Shift+H` canonicalized to `ctrl+shift+KeyH` on Linux/Windows, conflicting with the built-in replay-current-subtitle keybinding. The session-binding compiler drops conflicted bindings, so replay did not register. Moved default session help to `CommandOrControl+Slash` and added regression coverage that defaults compile without a conflict and keep replay/next actions on `Ctrl+Shift+H/L`.
|
||||||
|
|
||||||
|
Follow-up from live test: `Ctrl+Shift+H` works after resolving the help shortcut conflict, but `Ctrl+Shift+L` still behaves like native/other `Ctrl+L`. Investigating mpv/plugin key-name generation for shifted letter chords.
|
||||||
|
|
||||||
|
Follow-up fix: mpv normalizes shifted letter chords to uppercase letter key names (for example `Ctrl+Shift+l` becomes `Ctrl+L`). The plugin previously emitted `Ctrl+Shift+l`, which let live `Ctrl+Shift+L` fall through as the `Ctrl+L` key path. `plugin/subminer/session_bindings.lua` now emits uppercase letters and omits the Shift modifier for shifted `Key[A-Z]` bindings. Lua regression coverage now checks `Ctrl+Shift+KeyL -> Ctrl+L`, `Shift+KeyL -> L`, and the play-next CLI dispatch.
|
||||||
|
|
||||||
|
Second live follow-up: `Ctrl+Shift+L` routed to play-next but still behaved like `Shift+L` when playback was already paused because `MpvIpcClient.playNextSubtitle()` explicitly cleared `pendingPauseAtSubEnd` and only sent `sub-seek 1` in paused state. Changed play-next to always arm pause-at-sub-end, clear stale pause target, seek to next subtitle, and unpause when currently paused. Existing sub-end/time-pos handling then pauses at the next subtitle end.
|
||||||
|
<!-- SECTION:NOTES:END -->
|
||||||
|
|
||||||
|
## Final Summary
|
||||||
|
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||||
|
Changed the default session-help shortcut from `CommandOrControl+Shift+H` to `CommandOrControl+Slash` so `Ctrl+Shift+H` remains available for replay-current-subtitle and `Ctrl+Shift+L` remains available for play-next-subtitle. Updated config examples, docs-site shortcut/config/usage docs, and added changelog fragment `changes/340-default-subtitle-keybindings.md`.
|
||||||
|
|
||||||
|
Fixed both follow-up issues from live testing. First, the mpv plugin key-name converter now uses mpv's uppercase key form for shifted letter bindings (`Ctrl+Shift+KeyL` registers as `Ctrl+L`, `Shift+KeyL` as `L`). Second, `MpvIpcClient.playNextSubtitle()` now starts playback even when mpv is paused, keeps the pause-at-sub-end path armed, and lets existing subtitle-end timing pause again at the next subtitle end.
|
||||||
|
|
||||||
|
Regression coverage now includes compiled default bindings, Lua plugin shifted-letter registration/CLI dispatch, and paused-state play-next behavior.
|
||||||
|
|
||||||
|
Verification passed: targeted Bun session/mpv/protocol tests, `bun run test:plugin:src`, `bun run changelog:lint`, `bun run build`, and `bun run test:smoke:dist`. Earlier full gate also passed before the follow-ups: `bun run typecheck`, `bun run test:fast`, `bun run test:env`, docs/config checks, and dist smoke.
|
||||||
|
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
type: fixed
|
||||||
|
area: tokenizer
|
||||||
|
|
||||||
|
- Use Yomitan `wordClasses` metadata for subtitle POS filtering.
|
||||||
|
- Backfill blank MeCab POS detail fields during parser enrichment.
|
||||||
|
- Keep subtitle annotation metadata stripped from token results.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Fixed Hyprland fullscreen transitions so mpv fullscreen changes refresh visible overlay geometry, reassert topmost stacking, and keep primary subtitle hover pause working after resize/toggle cycles.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: tokenizer
|
||||||
|
|
||||||
|
- Stopped kana-only subtitle tokens from being selected as N+1 targets.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Restored persistent JLPT subtitle underlines while keeping hover JLPT labels and annotation color priority intact.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: shortcuts
|
||||||
|
|
||||||
|
- Accept follow-up number-row digits for multi-line subtitle mining even when the original shortcut modifiers are still held.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Suppressed subtitle annotation styling for standalone auxiliary inflection fragments such as `れる` and `れた` while keeping lexical `くれる` forms eligible for lookup metadata.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Suppressed subtitle annotation styling for grammar-only endings such as `じゃないですか` and standalone polite copula tails like `です` / `ですよ`.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: stats
|
||||||
|
|
||||||
|
- Kept regular app stats routing isolated from a separately running background stats daemon during playback startup.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Kept JLPT subtitle underlines at their JLPT color after dictionary lookups, even when the token also has another annotation color.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: tokenizer
|
||||||
|
|
||||||
|
- Tokenizer: Suppress annotations for ハァ-style interjection subtitles.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Refresh the current subtitle after successful card mining so newly known words recolor immediately.
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
type: fixed
|
||||||
|
area: tokenizer
|
||||||
|
|
||||||
|
- Tokenizer: Replaced hard-coded standalone grammar-ending permutations with shared pattern matching for polite copula, negative copula, and explanatory subtitle endings.
|
||||||
|
- Tokenizer: Kept grammar annotation exclusion logic in the shared subtitle filter and removed stale duplicate exclusion helpers from the annotation stage.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: anki
|
||||||
|
|
||||||
|
- Anki: Manual clipboard subtitle updates now preserve existing word audio while replacing sentence audio and animated-image media.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Kept the macOS visible overlay alive during transient mpv tracker flaps when the last tracked video geometry is still available.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: mpv
|
||||||
|
|
||||||
|
- mpv: Playlist navigation now reuses the running SubMiner overlay without repeating the pause-until-ready warmup gate.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Kept JLPT subtitle underlines on their JLPT color when lookup selection overlaps known-word or frequency annotation colors.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Fixed keyboard-only Yomitan popup controls so popup shortcuts take precedence over overlay keybindings like `j`.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: anilist
|
||||||
|
|
||||||
|
- AniList: Run post-watch progress checks on mpv time-position updates, read the fresh mpv position before threshold checks, wire manual mark-watched to force a progress sync, and fill missing `guessit` episode metadata from the filename parser.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: stats
|
||||||
|
|
||||||
|
- Restored stats startup deferral to a running background stats daemon so video launches no longer fail when the stats port is already in use.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: changed
|
||||||
|
area: stats
|
||||||
|
|
||||||
|
- Stats vocabulary exclusions now persist in the immersion database and import existing browser-local exclusions on first load.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Kept subtitle annotation prefetch running after immediate cache-hit renders so upcoming subtitle colors stay ready.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Fixed frequency highlighting for ordinal prefix-noun tokens like `第二` so popup ranks such as JPDB 1820 are preserved in subtitle annotations.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: tokenizer
|
||||||
|
|
||||||
|
- Suppressed N+1, JLPT, frequency, and name styling for `ある` / `有る` existence verbs while still allowing known-word highlighting.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: anilist
|
||||||
|
|
||||||
|
- AniList: Prevented duplicate post-watch writes during concurrent checks, preserved manual watched marks when post-watch sync fails, and kept known-word cache refresh notifications accurate after cache resets.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: anilist
|
||||||
|
|
||||||
|
- AniList: Kept config reload from opening the setup window during playback when token storage cannot be resolved, and stopped setup login from reporting success when encrypted token persistence fails.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Aligned the Hyprland fullscreen overlay with mpv when mpv reports client-requested fullscreen, force-applied exact Hyprland overlay window bounds after floating, disabled Hyprland floating-window decoration on exact overlay placement, compensated stats overlay placement for Electron/Wayland content insets, and made the stats overlay page/window opaque so mpv cannot show through transparent top insets.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Hid the browser focus outline on the top-level overlay surface so focused overlays no longer show a yellow/orange viewport border.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: anilist
|
||||||
|
|
||||||
|
- AniList: Retried Linux safeStorage availability after transient keyring startup failures so stored tokens can load and setup tokens can save once GNOME libsecret becomes available.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: tokenizer
|
||||||
|
|
||||||
|
- Prevented standalone grammar and helper tokens such as `に` from being colored as known words when readings from known-word decks match them.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Stopped Hyprland from pinning SubMiner overlay windows across workspaces while keeping floating placement for fullscreen alignment.
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
type: fixed
|
||||||
|
area: overlay
|
||||||
|
|
||||||
|
- Overlay: Fixed the default replay/next subtitle keybindings by moving the session-help shortcut to `Ctrl/Cmd+/`, leaving `Ctrl+Shift+H` and `Ctrl+Shift+L` free for subtitle playback controls. The mpv plugin now registers shifted letter chords with mpv's uppercase key form so `Ctrl+Shift+L` reaches the play-next-subtitle action instead of falling through as `Ctrl+L`, and play-next now starts playback from a paused state before pausing again at the subtitle end.
|
||||||
@@ -175,7 +175,7 @@
|
|||||||
"openCharacterDictionary": "CommandOrControl+Alt+A", // Open character dictionary setting.
|
"openCharacterDictionary": "CommandOrControl+Alt+A", // Open character dictionary setting.
|
||||||
"openRuntimeOptions": "CommandOrControl+Shift+O", // Open runtime options setting.
|
"openRuntimeOptions": "CommandOrControl+Shift+O", // Open runtime options setting.
|
||||||
"openJimaku": "Ctrl+Shift+J", // Open jimaku setting.
|
"openJimaku": "Ctrl+Shift+J", // Open jimaku setting.
|
||||||
"openSessionHelp": "CommandOrControl+Shift+H", // Open session help setting.
|
"openSessionHelp": "CommandOrControl+Slash", // Open session help setting.
|
||||||
"openControllerSelect": "Alt+C", // Open controller select setting.
|
"openControllerSelect": "Alt+C", // Open controller select setting.
|
||||||
"openControllerDebug": "Alt+Shift+C", // Open controller debug setting.
|
"openControllerDebug": "Alt+Shift+C", // Open controller debug setting.
|
||||||
"toggleSubtitleSidebar": "Backslash" // Toggle subtitle sidebar setting.
|
"toggleSubtitleSidebar": "Backslash" // Toggle subtitle sidebar setting.
|
||||||
|
|||||||
@@ -213,7 +213,7 @@ Animated AVIF requires an AV1 encoder (`libaom-av1`, `libsvtav1`, or `librav1e`)
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
`overwriteAudio` applies to automatic card updates and duplicate-card enrichment. Manual clipboard subtitle updates (`Ctrl/Cmd+C`, then `Ctrl/Cmd+V`) always replace generated audio in both the expression audio field and sentence audio field.
|
`overwriteAudio` applies to automatic card updates and duplicate-card enrichment. Manual clipboard subtitle updates (`Ctrl/Cmd+C`, then `Ctrl/Cmd+V`) always replace generated sentence audio, while leaving the word audio field unchanged.
|
||||||
|
|
||||||
## AI Translation
|
## AI Translation
|
||||||
|
|
||||||
|
|||||||
@@ -537,7 +537,7 @@ See `config.example.jsonc` for detailed configuration options.
|
|||||||
"markAudioCard": "CommandOrControl+Shift+A",
|
"markAudioCard": "CommandOrControl+Shift+A",
|
||||||
"openCharacterDictionary": "CommandOrControl+Alt+A",
|
"openCharacterDictionary": "CommandOrControl+Alt+A",
|
||||||
"openRuntimeOptions": "CommandOrControl+Shift+O",
|
"openRuntimeOptions": "CommandOrControl+Shift+O",
|
||||||
"openSessionHelp": "CommandOrControl+Shift+H",
|
"openSessionHelp": "CommandOrControl+Slash",
|
||||||
"openControllerSelect": "Alt+C",
|
"openControllerSelect": "Alt+C",
|
||||||
"openControllerDebug": "Alt+Shift+C",
|
"openControllerDebug": "Alt+Shift+C",
|
||||||
"openJimaku": "Ctrl+Shift+J",
|
"openJimaku": "Ctrl+Shift+J",
|
||||||
@@ -562,7 +562,7 @@ See `config.example.jsonc` for detailed configuration options.
|
|||||||
| `markAudioCard` | string \| `null` | Accelerator for marking last card as audio card (default: `"CommandOrControl+Shift+A"`) |
|
| `markAudioCard` | string \| `null` | Accelerator for marking last card as audio card (default: `"CommandOrControl+Shift+A"`) |
|
||||||
| `openCharacterDictionary` | string \| `null` | Opens the character dictionary AniList selector (default: `"CommandOrControl+Alt+A"`) |
|
| `openCharacterDictionary` | string \| `null` | Opens the character dictionary AniList selector (default: `"CommandOrControl+Alt+A"`) |
|
||||||
| `openRuntimeOptions` | string \| `null` | Opens runtime options palette for live session-only toggles (default: `"CommandOrControl+Shift+O"`) |
|
| `openRuntimeOptions` | string \| `null` | Opens runtime options palette for live session-only toggles (default: `"CommandOrControl+Shift+O"`) |
|
||||||
| `openSessionHelp` | string \| `null` | Opens the in-overlay session help modal (default: `"CommandOrControl+Shift+H"`) |
|
| `openSessionHelp` | string \| `null` | Opens the in-overlay session help modal (default: `"CommandOrControl+Slash"`) |
|
||||||
| `openControllerSelect` | string \| `null` | Opens the controller config/remap modal (default: `"Alt+C"`) |
|
| `openControllerSelect` | string \| `null` | Opens the controller config/remap modal (default: `"Alt+C"`) |
|
||||||
| `openControllerDebug` | string \| `null` | Opens the controller debug modal (default: `"Alt+Shift+C"`) |
|
| `openControllerDebug` | string \| `null` | Opens the controller debug modal (default: `"Alt+Shift+C"`) |
|
||||||
| `openJimaku` | string \| `null` | Opens the Jimaku search modal (default: `"Ctrl+Shift+J"`) |
|
| `openJimaku` | string \| `null` | Opens the Jimaku search modal (default: `"Ctrl+Shift+J"`) |
|
||||||
@@ -706,7 +706,7 @@ These shortcuts are only active when the overlay window is visible and automatic
|
|||||||
|
|
||||||
### Session Help Modal
|
### Session Help Modal
|
||||||
|
|
||||||
The session help modal opens from the overlay with `Ctrl/Cmd+Shift+H` by default. The mpv plugin also exposes it through the `Y-H` chord (falling back to `Y-K` if needed). It shows the current session keybindings and color legend.
|
The session help modal opens from the overlay with `Ctrl/Cmd+/` by default. The mpv plugin also exposes it through the `Y-H` chord (falling back to `Y-K` if needed). It shows the current session keybindings and color legend.
|
||||||
|
|
||||||
You can filter the modal quickly with `/`:
|
You can filter the modal quickly with `/`:
|
||||||
|
|
||||||
@@ -893,7 +893,7 @@ This example is intentionally compact. The option table below documents availabl
|
|||||||
| `media.audioPadding` | number (seconds) | Padding around audio clip timing (default: `0.5`) |
|
| `media.audioPadding` | number (seconds) | Padding around audio clip timing (default: `0.5`) |
|
||||||
| `media.fallbackDuration` | number (seconds) | Default duration if timing unavailable (default: `3.0`) |
|
| `media.fallbackDuration` | number (seconds) | Default duration if timing unavailable (default: `3.0`) |
|
||||||
| `media.maxMediaDuration` | number (seconds) | Max duration for generated media from multi-line copy (default: `30`, `0` to disable) |
|
| `media.maxMediaDuration` | number (seconds) | Max duration for generated media from multi-line copy (default: `30`, `0` to disable) |
|
||||||
| `behavior.overwriteAudio` | `true`, `false` | Replace existing audio on updates; when `false`, new audio is appended/prepended per `behavior.mediaInsertMode`; manual clipboard updates always replace generated audio (default: `true`) |
|
| `behavior.overwriteAudio` | `true`, `false` | Replace existing audio on updates; when `false`, new audio is appended/prepended per `behavior.mediaInsertMode`; manual clipboard updates always replace generated sentence audio (default: `true`) |
|
||||||
| `behavior.overwriteImage` | `true`, `false` | Replace existing images on updates; when `false`, new images are appended/prepended per `behavior.mediaInsertMode` (default: `true`) |
|
| `behavior.overwriteImage` | `true`, `false` | Replace existing images on updates; when `false`, new images are appended/prepended per `behavior.mediaInsertMode` (default: `true`) |
|
||||||
| `behavior.mediaInsertMode` | `"append"`, `"prepend"` | Where to insert new media when overwrite is off (default: `"append"`) |
|
| `behavior.mediaInsertMode` | `"append"`, `"prepend"` | Where to insert new media when overwrite is off (default: `"append"`) |
|
||||||
| `behavior.highlightWord` | `true`, `false` | Highlight the word in sentence context (default: `true`) |
|
| `behavior.highlightWord` | `true`, `false` | Highlight the word in sentence context (default: `true`) |
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ Secondary subtitle text (typically English translations) is stored alongside pri
|
|||||||
|
|
||||||
### Word Exclusion List
|
### Word Exclusion List
|
||||||
|
|
||||||
The Vocabulary tab toolbar includes an **Exclusions** button for hiding words from all vocabulary views. Excluded words are stored in browser localStorage and can be managed (restored or cleared) from the exclusion modal. Exclusions affect stat cards, charts, the frequency rank table, and the word list.
|
The Vocabulary tab toolbar includes an **Exclusions** button for hiding words from all vocabulary views. Excluded words are stored in the immersion database, with older browser localStorage exclusions imported on first load after upgrade. They can be managed (restored or cleared) from the exclusion modal. Exclusions affect stat cards, charts, the frequency rank table, and the word list.
|
||||||
|
|
||||||
## Retention Defaults
|
## Retention Defaults
|
||||||
|
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ If you prefer a hands-on approach (animecards-style), you can copy the current s
|
|||||||
- For multiple lines: press `Ctrl/Cmd+Shift+C`, then a digit `1`–`9` to select how many recent subtitle lines to combine. The combined text is copied to the clipboard.
|
- For multiple lines: press `Ctrl/Cmd+Shift+C`, then a digit `1`–`9` to select how many recent subtitle lines to combine. The combined text is copied to the clipboard.
|
||||||
3. Press `Ctrl/Cmd+V` to update the last-added card with the clipboard contents plus audio, image, and translation — the same fields auto-update would fill.
|
3. Press `Ctrl/Cmd+V` to update the last-added card with the clipboard contents plus audio, image, and translation — the same fields auto-update would fill.
|
||||||
|
|
||||||
Manual clipboard updates always replace generated audio in both the expression audio field and sentence audio field, even when `ankiConnect.behavior.overwriteAudio` is disabled. The manual flow assumes you are intentionally replacing the proxy-generated clip on the newest card.
|
Manual clipboard updates always replace generated sentence audio, even when `ankiConnect.behavior.overwriteAudio` is disabled. The word audio field is left unchanged because the word itself does not change in this flow.
|
||||||
|
|
||||||
This is useful when auto-update is disabled or when you want explicit control over which subtitle line gets attached to the card.
|
This is useful when auto-update is disabled or when you want explicit control over which subtitle line gets attached to the card.
|
||||||
|
|
||||||
|
|||||||
@@ -175,7 +175,7 @@
|
|||||||
"openCharacterDictionary": "CommandOrControl+Alt+A", // Open character dictionary setting.
|
"openCharacterDictionary": "CommandOrControl+Alt+A", // Open character dictionary setting.
|
||||||
"openRuntimeOptions": "CommandOrControl+Shift+O", // Open runtime options setting.
|
"openRuntimeOptions": "CommandOrControl+Shift+O", // Open runtime options setting.
|
||||||
"openJimaku": "Ctrl+Shift+J", // Open jimaku setting.
|
"openJimaku": "Ctrl+Shift+J", // Open jimaku setting.
|
||||||
"openSessionHelp": "CommandOrControl+Shift+H", // Open session help setting.
|
"openSessionHelp": "CommandOrControl+Slash", // Open session help setting.
|
||||||
"openControllerSelect": "Alt+C", // Open controller select setting.
|
"openControllerSelect": "Alt+C", // Open controller select setting.
|
||||||
"openControllerDebug": "Alt+Shift+C", // Open controller debug setting.
|
"openControllerDebug": "Alt+Shift+C", // Open controller debug setting.
|
||||||
"toggleSubtitleSidebar": "Backslash" // Toggle subtitle sidebar setting.
|
"toggleSubtitleSidebar": "Backslash" // Toggle subtitle sidebar setting.
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ Mouse-hover playback behavior is configured separately from shortcuts: `subtitle
|
|||||||
| `Ctrl/Cmd+Shift+V` | Cycle secondary subtitle mode (hidden → visible → hover) | `shortcuts.toggleSecondarySub` |
|
| `Ctrl/Cmd+Shift+V` | Cycle secondary subtitle mode (hidden → visible → hover) | `shortcuts.toggleSecondarySub` |
|
||||||
| `Ctrl/Cmd+Alt+A` | Open character dictionary AniList selector | `shortcuts.openCharacterDictionary` |
|
| `Ctrl/Cmd+Alt+A` | Open character dictionary AniList selector | `shortcuts.openCharacterDictionary` |
|
||||||
| `Ctrl/Cmd+Shift+O` | Open runtime options palette | `shortcuts.openRuntimeOptions` |
|
| `Ctrl/Cmd+Shift+O` | Open runtime options palette | `shortcuts.openRuntimeOptions` |
|
||||||
| `Ctrl/Cmd+Shift+H` | Open session help modal | `shortcuts.openSessionHelp` |
|
| `Ctrl/Cmd+/` | Open session help modal | `shortcuts.openSessionHelp` |
|
||||||
| `Ctrl+Shift+J` | Open Jimaku subtitle search modal | `shortcuts.openJimaku` |
|
| `Ctrl+Shift+J` | Open Jimaku subtitle search modal | `shortcuts.openJimaku` |
|
||||||
| `Ctrl+Alt+C` | Open the manual YouTube subtitle picker | `keybindings` |
|
| `Ctrl+Alt+C` | Open the manual YouTube subtitle picker | `keybindings` |
|
||||||
| `Ctrl+Alt+S` | Open subtitle sync (subsync) modal | `shortcuts.triggerSubsync` |
|
| `Ctrl+Alt+S` | Open subtitle sync (subsync) modal | `shortcuts.triggerSubsync` |
|
||||||
|
|||||||
@@ -324,6 +324,10 @@ Add a `pass` rule for each global shortcut you configure. The defaults are `Alt+
|
|||||||
|
|
||||||
Without these rules, Hyprland intercepts the keypresses before they reach SubMiner, and the shortcuts silently do nothing.
|
Without these rules, Hyprland intercepts the keypresses before they reach SubMiner, and the shortcuts silently do nothing.
|
||||||
|
|
||||||
|
**Overlay stays behind mpv after fullscreen**
|
||||||
|
|
||||||
|
SubMiner watches mpv's `fullscreen` property and refreshes the overlay geometry when it changes. If the overlay still does not move or rise above fullscreen mpv, confirm that the mpv IPC socket is connected and that `hyprctl -j clients` and `hyprctl -j monitors` work from the same environment that launched SubMiner.
|
||||||
|
|
||||||
For more details, see the Hyprland docs on [global keybinds](https://wiki.hypr.land/Configuring/Binds/#global-keybinds) and [window rules](https://wiki.hypr.land/Configuring/Window-Rules/).
|
For more details, see the Hyprland docs on [global keybinds](https://wiki.hypr.land/Configuring/Binds/#global-keybinds) and [window rules](https://wiki.hypr.land/Configuring/Window-Rules/).
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|||||||
+1
-1
@@ -334,7 +334,7 @@ Useful overlay-local default keybinding: `Ctrl+Alt+P` opens the playlist browser
|
|||||||
|
|
||||||
Press `V` to hide or restore the primary SubMiner subtitle bar. The mpv plugin also binds bare `v` to the same action, overriding mpv's native primary subtitle visibility toggle.
|
Press `V` to hide or restore the primary SubMiner subtitle bar. The mpv plugin also binds bare `v` to the same action, overriding mpv's native primary subtitle visibility toggle.
|
||||||
|
|
||||||
`Ctrl/Cmd+Shift+H` opens the session help modal with the current overlay and mpv keybindings. If you use the mpv plugin, the same help view is also available through the `y-h` chord.
|
`Ctrl/Cmd+/` opens the session help modal with the current overlay and mpv keybindings. If you use the mpv plugin, the same help view is also available through the `y-h` chord.
|
||||||
|
|
||||||
Hovering over subtitle text pauses mpv by default; leaving resumes it. Yomitan popups also pause playback by default. Set `subtitleStyle.autoPauseVideoOnHover: false` or `subtitleStyle.autoPauseVideoOnYomitanPopup: false` to disable either behavior.
|
Hovering over subtitle text pauses mpv by default; leaving resumes it. Yomitan popups also pause playback by default. Set `subtitleStyle.autoPauseVideoOnHover: false` or `subtitleStyle.autoPauseVideoOnYomitanPopup: false` to disable either behavior.
|
||||||
|
|
||||||
|
|||||||
+2
-2
@@ -45,10 +45,10 @@
|
|||||||
"test:config:src": "bun test src/config/config.test.ts src/config/path-resolution.test.ts src/config/resolve/anki-connect.test.ts src/config/resolve/integrations.test.ts src/config/resolve/subtitle-style.test.ts src/config/resolve/jellyfin.test.ts src/config/definitions/domain-registry.test.ts src/generate-config-example.test.ts src/verify-config-example.test.ts",
|
"test:config:src": "bun test src/config/config.test.ts src/config/path-resolution.test.ts src/config/resolve/anki-connect.test.ts src/config/resolve/integrations.test.ts src/config/resolve/subtitle-style.test.ts src/config/resolve/jellyfin.test.ts src/config/definitions/domain-registry.test.ts src/generate-config-example.test.ts src/verify-config-example.test.ts",
|
||||||
"test:config:dist": "bun test dist/config/config.test.js dist/config/path-resolution.test.js dist/config/resolve/anki-connect.test.js dist/config/resolve/integrations.test.js dist/config/resolve/subtitle-style.test.js dist/config/resolve/jellyfin.test.js dist/config/definitions/domain-registry.test.js dist/generate-config-example.test.js dist/verify-config-example.test.js",
|
"test:config:dist": "bun test dist/config/config.test.js dist/config/path-resolution.test.js dist/config/resolve/anki-connect.test.js dist/config/resolve/integrations.test.js dist/config/resolve/subtitle-style.test.js dist/config/resolve/jellyfin.test.js dist/config/definitions/domain-registry.test.js dist/generate-config-example.test.js dist/verify-config-example.test.js",
|
||||||
"test:config:smoke:dist": "bun test dist/config/path-resolution.test.js",
|
"test:config:smoke:dist": "bun test dist/config/path-resolution.test.js",
|
||||||
"test:plugin:src": "lua scripts/test-plugin-lua-compat.lua && lua scripts/test-plugin-start-gate.lua && lua scripts/test-plugin-binary-windows.lua",
|
"test:plugin:src": "lua scripts/test-plugin-lua-compat.lua && lua scripts/test-plugin-start-gate.lua && lua scripts/test-plugin-session-bindings.lua && lua scripts/test-plugin-binary-windows.lua",
|
||||||
"test:launcher:smoke:src": "bun test launcher/smoke.e2e.test.ts",
|
"test:launcher:smoke:src": "bun test launcher/smoke.e2e.test.ts",
|
||||||
"test:launcher:src": "bun test launcher/config.test.ts launcher/config-domain-parsers.test.ts launcher/config/cli-parser-builder.test.ts launcher/config/args-normalizer.test.ts launcher/mpv.test.ts launcher/picker.test.ts launcher/parse-args.test.ts launcher/main.test.ts launcher/commands/command-modules.test.ts launcher/smoke.e2e.test.ts && bun run test:plugin:src",
|
"test:launcher:src": "bun test launcher/config.test.ts launcher/config-domain-parsers.test.ts launcher/config/cli-parser-builder.test.ts launcher/config/args-normalizer.test.ts launcher/mpv.test.ts launcher/picker.test.ts launcher/parse-args.test.ts launcher/main.test.ts launcher/commands/command-modules.test.ts launcher/smoke.e2e.test.ts && bun run test:plugin:src",
|
||||||
"test:core:src": "bun test src/cli/args.test.ts src/cli/help.test.ts src/shared/setup-state.test.ts src/core/services/cli-command.test.ts src/core/services/field-grouping-overlay.test.ts src/core/services/numeric-shortcut-session.test.ts src/core/services/secondary-subtitle.test.ts src/core/services/mpv-render-metrics.test.ts src/core/services/overlay-content-measurement.test.ts src/core/services/mpv-control.test.ts src/core/services/mpv.test.ts src/core/services/runtime-options-ipc.test.ts src/core/services/runtime-config.test.ts src/core/services/yomitan-extension-paths.test.ts src/core/services/config-hot-reload.test.ts src/core/services/discord-presence.test.ts src/core/services/tokenizer.test.ts src/core/services/tokenizer/annotation-stage.test.ts src/core/services/tokenizer/parser-selection-stage.test.ts src/core/services/tokenizer/parser-enrichment-stage.test.ts src/core/services/subsync.test.ts src/core/services/overlay-bridge.test.ts src/core/services/overlay-shortcut-handler.test.ts src/core/services/stats-window.test.ts src/main/runtime/stats-server-routing.test.ts src/core/services/mining.test.ts src/core/services/anki-jimaku.test.ts src/core/services/jimaku-download-path.test.ts src/core/services/jellyfin.test.ts src/core/services/jellyfin-remote.test.ts src/core/services/immersion-tracker-service.test.ts src/core/services/overlay-runtime-init.test.ts src/core/services/app-ready.test.ts src/core/services/startup-bootstrap.test.ts src/core/services/subtitle-processing-controller.test.ts src/core/services/anilist/anilist-update-queue.test.ts src/core/services/anilist/rate-limiter.test.ts src/core/services/jlpt-token-filter.test.ts src/core/services/subtitle-position.test.ts src/core/utils/shortcut-config.test.ts src/main/runtime/first-run-setup-plugin.test.ts src/main/runtime/first-run-setup-service.test.ts src/main/runtime/first-run-setup-window.test.ts src/main/runtime/tray-runtime.test.ts src/main/runtime/tray-main-actions.test.ts src/main/runtime/tray-main-deps.test.ts src/main/runtime/tray-runtime-handlers.test.ts src/main/runtime/cli-command-context-main-deps.test.ts src/main/runtime/app-ready-main-deps.test.ts src/renderer/error-recovery.test.ts src/renderer/subtitle-render.test.ts src/renderer/handlers/mouse.test.ts src/renderer/handlers/keyboard.test.ts src/renderer/modals/jimaku.test.ts src/subsync/utils.test.ts src/main/anilist-url-guard.test.ts src/window-trackers/hyprland-tracker.test.ts src/window-trackers/x11-tracker.test.ts src/window-trackers/windows-helper.test.ts src/window-trackers/windows-tracker.test.ts launcher/config.test.ts launcher/config-domain-parsers.test.ts launcher/config/cli-parser-builder.test.ts launcher/config/args-normalizer.test.ts launcher/parse-args.test.ts launcher/main.test.ts launcher/commands/command-modules.test.ts launcher/setup-gate.test.ts stats/src/lib/api-client.test.ts",
|
"test:core:src": "bun test src/cli/args.test.ts src/cli/help.test.ts src/shared/setup-state.test.ts src/core/services/cli-command.test.ts src/core/services/field-grouping-overlay.test.ts src/core/services/numeric-shortcut-session.test.ts src/core/services/secondary-subtitle.test.ts src/core/services/mpv-render-metrics.test.ts src/core/services/overlay-content-measurement.test.ts src/core/services/mpv-control.test.ts src/core/services/mpv.test.ts src/core/services/runtime-options-ipc.test.ts src/core/services/runtime-config.test.ts src/core/services/yomitan-extension-paths.test.ts src/core/services/config-hot-reload.test.ts src/core/services/discord-presence.test.ts src/core/services/tokenizer.test.ts src/core/services/tokenizer/annotation-stage.test.ts src/core/services/tokenizer/parser-selection-stage.test.ts src/core/services/tokenizer/parser-enrichment-stage.test.ts src/core/services/subsync.test.ts src/core/services/overlay-bridge.test.ts src/core/services/overlay-shortcut-handler.test.ts src/core/services/stats-window.test.ts src/core/services/__tests__/stats-server.test.ts src/main/runtime/stats-server-routing.test.ts src/core/services/mining.test.ts src/core/services/anki-jimaku.test.ts src/core/services/jimaku-download-path.test.ts src/core/services/jellyfin.test.ts src/core/services/jellyfin-remote.test.ts src/core/services/immersion-tracker-service.test.ts src/core/services/overlay-runtime-init.test.ts src/core/services/app-ready.test.ts src/core/services/startup-bootstrap.test.ts src/core/services/subtitle-processing-controller.test.ts src/core/services/anilist/anilist-update-queue.test.ts src/core/services/anilist/rate-limiter.test.ts src/core/services/jlpt-token-filter.test.ts src/core/services/subtitle-position.test.ts src/core/utils/shortcut-config.test.ts src/main/runtime/first-run-setup-plugin.test.ts src/main/runtime/first-run-setup-service.test.ts src/main/runtime/first-run-setup-window.test.ts src/main/runtime/tray-runtime.test.ts src/main/runtime/tray-main-actions.test.ts src/main/runtime/tray-main-deps.test.ts src/main/runtime/tray-runtime-handlers.test.ts src/main/runtime/cli-command-context-main-deps.test.ts src/main/runtime/app-ready-main-deps.test.ts src/renderer/error-recovery.test.ts src/renderer/subtitle-render.test.ts src/renderer/handlers/mouse.test.ts src/renderer/handlers/keyboard.test.ts src/renderer/modals/jimaku.test.ts src/subsync/utils.test.ts src/main/anilist-url-guard.test.ts src/window-trackers/hyprland-tracker.test.ts src/window-trackers/x11-tracker.test.ts src/window-trackers/windows-helper.test.ts src/window-trackers/windows-tracker.test.ts launcher/config.test.ts launcher/config-domain-parsers.test.ts launcher/config/cli-parser-builder.test.ts launcher/config/args-normalizer.test.ts launcher/parse-args.test.ts launcher/main.test.ts launcher/commands/command-modules.test.ts launcher/setup-gate.test.ts stats/src/lib/api-client.test.ts stats/src/hooks/useExcludedWords.test.ts stats/src/styles/globals.test.ts",
|
||||||
"test:core:dist": "bun test dist/cli/args.test.js dist/cli/help.test.js dist/core/services/cli-command.test.js dist/core/services/ipc.test.js dist/core/services/anki-jimaku-ipc.test.js dist/core/services/field-grouping-overlay.test.js dist/core/services/numeric-shortcut-session.test.js dist/core/services/secondary-subtitle.test.js dist/core/services/mpv-render-metrics.test.js dist/core/services/overlay-content-measurement.test.js dist/core/services/mpv-control.test.js dist/core/services/mpv.test.js dist/core/services/runtime-options-ipc.test.js dist/core/services/runtime-config.test.js dist/core/services/yomitan-extension-paths.test.js dist/core/services/config-hot-reload.test.js dist/core/services/discord-presence.test.js dist/core/services/tokenizer.test.js dist/core/services/tokenizer/annotation-stage.test.js dist/core/services/tokenizer/parser-selection-stage.test.js dist/core/services/tokenizer/parser-enrichment-stage.test.js dist/core/services/subsync.test.js dist/core/services/overlay-bridge.test.js dist/core/services/overlay-manager.test.js dist/core/services/overlay-shortcut-handler.test.js dist/core/services/mining.test.js dist/core/services/anki-jimaku.test.js dist/core/services/jimaku-download-path.test.js dist/core/services/jellyfin.test.js dist/core/services/jellyfin-remote.test.js dist/core/services/immersion-tracker-service.test.js dist/core/services/overlay-runtime-init.test.js dist/core/services/app-ready.test.js dist/core/services/startup-bootstrap.test.js dist/core/services/subtitle-processing-controller.test.js dist/core/services/anilist/anilist-token-store.test.js dist/core/services/anilist/anilist-update-queue.test.js dist/core/services/anilist/rate-limiter.test.js dist/core/services/jlpt-token-filter.test.js dist/core/services/subtitle-position.test.js dist/renderer/error-recovery.test.js dist/renderer/subtitle-render.test.js dist/renderer/handlers/mouse.test.js dist/renderer/handlers/keyboard.test.js dist/renderer/modals/jimaku.test.js dist/subsync/utils.test.js dist/main/anilist-url-guard.test.js dist/window-trackers/hyprland-tracker.test.js dist/window-trackers/x11-tracker.test.js dist/window-trackers/windows-helper.test.js dist/window-trackers/windows-tracker.test.js",
|
"test:core:dist": "bun test dist/cli/args.test.js dist/cli/help.test.js dist/core/services/cli-command.test.js dist/core/services/ipc.test.js dist/core/services/anki-jimaku-ipc.test.js dist/core/services/field-grouping-overlay.test.js dist/core/services/numeric-shortcut-session.test.js dist/core/services/secondary-subtitle.test.js dist/core/services/mpv-render-metrics.test.js dist/core/services/overlay-content-measurement.test.js dist/core/services/mpv-control.test.js dist/core/services/mpv.test.js dist/core/services/runtime-options-ipc.test.js dist/core/services/runtime-config.test.js dist/core/services/yomitan-extension-paths.test.js dist/core/services/config-hot-reload.test.js dist/core/services/discord-presence.test.js dist/core/services/tokenizer.test.js dist/core/services/tokenizer/annotation-stage.test.js dist/core/services/tokenizer/parser-selection-stage.test.js dist/core/services/tokenizer/parser-enrichment-stage.test.js dist/core/services/subsync.test.js dist/core/services/overlay-bridge.test.js dist/core/services/overlay-manager.test.js dist/core/services/overlay-shortcut-handler.test.js dist/core/services/mining.test.js dist/core/services/anki-jimaku.test.js dist/core/services/jimaku-download-path.test.js dist/core/services/jellyfin.test.js dist/core/services/jellyfin-remote.test.js dist/core/services/immersion-tracker-service.test.js dist/core/services/overlay-runtime-init.test.js dist/core/services/app-ready.test.js dist/core/services/startup-bootstrap.test.js dist/core/services/subtitle-processing-controller.test.js dist/core/services/anilist/anilist-token-store.test.js dist/core/services/anilist/anilist-update-queue.test.js dist/core/services/anilist/rate-limiter.test.js dist/core/services/jlpt-token-filter.test.js dist/core/services/subtitle-position.test.js dist/renderer/error-recovery.test.js dist/renderer/subtitle-render.test.js dist/renderer/handlers/mouse.test.js dist/renderer/handlers/keyboard.test.js dist/renderer/modals/jimaku.test.js dist/subsync/utils.test.js dist/main/anilist-url-guard.test.js dist/window-trackers/hyprland-tracker.test.js dist/window-trackers/x11-tracker.test.js dist/window-trackers/windows-helper.test.js dist/window-trackers/windows-tracker.test.js",
|
||||||
"test:core:smoke:dist": "bun test dist/cli/help.test.js dist/core/services/runtime-config.test.js dist/core/services/ipc.test.js dist/core/services/overlay-manager.test.js dist/core/services/anilist/anilist-token-store.test.js dist/core/services/startup-bootstrap.test.js dist/renderer/error-recovery.test.js dist/main/anilist-url-guard.test.js dist/window-trackers/x11-tracker.test.js",
|
"test:core:smoke:dist": "bun test dist/cli/help.test.js dist/core/services/runtime-config.test.js dist/core/services/ipc.test.js dist/core/services/overlay-manager.test.js dist/core/services/anilist/anilist-token-store.test.js dist/core/services/startup-bootstrap.test.js dist/renderer/error-recovery.test.js dist/main/anilist-url-guard.test.js dist/window-trackers/x11-tracker.test.js",
|
||||||
"test:smoke:dist": "bun run test:config:smoke:dist && bun run test:core:smoke:dist",
|
"test:smoke:dist": "bun run test:config:smoke:dist && bun run test:core:smoke:dist",
|
||||||
|
|||||||
@@ -83,11 +83,17 @@ function M.create(ctx)
|
|||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
aniskip.clear_aniskip_state()
|
|
||||||
process.disarm_auto_play_ready_gate()
|
|
||||||
local has_matching_socket = rearm_managed_subtitle_defaults()
|
|
||||||
|
|
||||||
local should_auto_start = resolve_auto_start_enabled()
|
local should_auto_start = resolve_auto_start_enabled()
|
||||||
|
local has_matching_socket = process.has_matching_mpv_ipc_socket(opts.socket_path)
|
||||||
|
local preserve_active_auto_start_gate = (
|
||||||
|
state.overlay_running and state.auto_play_ready_gate_armed and should_auto_start and has_matching_socket
|
||||||
|
)
|
||||||
|
aniskip.clear_aniskip_state()
|
||||||
|
if not preserve_active_auto_start_gate then
|
||||||
|
process.disarm_auto_play_ready_gate()
|
||||||
|
end
|
||||||
|
has_matching_socket = rearm_managed_subtitle_defaults()
|
||||||
|
|
||||||
if should_auto_start then
|
if should_auto_start then
|
||||||
if not has_matching_socket then
|
if not has_matching_socket then
|
||||||
subminer_log(
|
subminer_log(
|
||||||
|
|||||||
@@ -299,14 +299,7 @@ function M.create(ctx)
|
|||||||
if overrides.auto_start_trigger == true then
|
if overrides.auto_start_trigger == true then
|
||||||
subminer_log("debug", "process", "Auto-start ignored because overlay is already running")
|
subminer_log("debug", "process", "Auto-start ignored because overlay is already running")
|
||||||
local socket_path = overrides.socket_path or opts.socket_path
|
local socket_path = overrides.socket_path or opts.socket_path
|
||||||
local should_pause_until_ready = (
|
if not state.auto_play_ready_gate_armed then
|
||||||
resolve_visible_overlay_startup()
|
|
||||||
and resolve_pause_until_ready()
|
|
||||||
and has_matching_mpv_ipc_socket(socket_path)
|
|
||||||
)
|
|
||||||
if should_pause_until_ready then
|
|
||||||
arm_auto_play_ready_gate()
|
|
||||||
else
|
|
||||||
disarm_auto_play_ready_gate()
|
disarm_auto_play_ready_gate()
|
||||||
end
|
end
|
||||||
local visibility_action = resolve_visible_overlay_startup()
|
local visibility_action = resolve_visible_overlay_startup()
|
||||||
|
|||||||
@@ -96,16 +96,30 @@ function M.create(ctx)
|
|||||||
return nil
|
return nil
|
||||||
end
|
end
|
||||||
|
|
||||||
|
local shifted_letter = key.code:match("^Key([A-Z])$")
|
||||||
|
local has_shift = false
|
||||||
|
for _, modifier in ipairs(key.modifiers) do
|
||||||
|
if modifier == "shift" then
|
||||||
|
has_shift = true
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
local key_name = key_code_to_mpv_name(key.code)
|
local key_name = key_code_to_mpv_name(key.code)
|
||||||
|
if shifted_letter and has_shift then
|
||||||
|
key_name = shifted_letter
|
||||||
|
end
|
||||||
if not key_name then
|
if not key_name then
|
||||||
return nil
|
return nil
|
||||||
end
|
end
|
||||||
|
|
||||||
local parts = {}
|
local parts = {}
|
||||||
for _, modifier in ipairs(key.modifiers) do
|
for _, modifier in ipairs(key.modifiers) do
|
||||||
local mapped = MODIFIER_MAP[modifier]
|
if not (modifier == "shift" and shifted_letter) then
|
||||||
if mapped then
|
local mapped = MODIFIER_MAP[modifier]
|
||||||
parts[#parts + 1] = mapped
|
if mapped then
|
||||||
|
parts[#parts + 1] = mapped
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
parts[#parts + 1] = key_name
|
parts[#parts + 1] = key_name
|
||||||
@@ -225,16 +239,39 @@ function M.create(ctx)
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
local function start_numeric_selection(action_id, timeout_ms)
|
local function build_modifier_prefixes(modifiers)
|
||||||
|
local prefixes = { "" }
|
||||||
|
if type(modifiers) ~= "table" then
|
||||||
|
return prefixes
|
||||||
|
end
|
||||||
|
|
||||||
|
for _, modifier in ipairs(modifiers) do
|
||||||
|
local mapped = MODIFIER_MAP[modifier]
|
||||||
|
if mapped then
|
||||||
|
local existing_count = #prefixes
|
||||||
|
for index = 1, existing_count do
|
||||||
|
prefixes[#prefixes + 1] = prefixes[index] .. mapped .. "+"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
return prefixes
|
||||||
|
end
|
||||||
|
|
||||||
|
local function start_numeric_selection(action_id, timeout_ms, starter_modifiers)
|
||||||
clear_numeric_selection(false)
|
clear_numeric_selection(false)
|
||||||
|
local modifier_prefixes = build_modifier_prefixes(starter_modifiers)
|
||||||
for digit = 1, 9 do
|
for digit = 1, 9 do
|
||||||
local digit_string = tostring(digit)
|
local digit_string = tostring(digit)
|
||||||
local name = "subminer-session-digit-" .. digit_string
|
for _, prefix in ipairs(modifier_prefixes) do
|
||||||
state.session_numeric_binding_names[#state.session_numeric_binding_names + 1] = name
|
local key_name = prefix .. digit_string
|
||||||
mp.add_forced_key_binding(digit_string, name, function()
|
local modifier_name = prefix:gsub("[^%w]", "-")
|
||||||
clear_numeric_selection(false)
|
local name = "subminer-session-digit-" .. modifier_name .. digit_string
|
||||||
invoke_cli_action(action_id, { count = digit })
|
state.session_numeric_binding_names[#state.session_numeric_binding_names + 1] = name
|
||||||
end)
|
mp.add_forced_key_binding(key_name, name, function()
|
||||||
|
clear_numeric_selection(false)
|
||||||
|
invoke_cli_action(action_id, { count = digit })
|
||||||
|
end)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
state.session_numeric_binding_names[#state.session_numeric_binding_names + 1] =
|
state.session_numeric_binding_names[#state.session_numeric_binding_names + 1] =
|
||||||
@@ -272,7 +309,7 @@ function M.create(ctx)
|
|||||||
end
|
end
|
||||||
|
|
||||||
if binding.actionId == "copySubtitleMultiple" or binding.actionId == "mineSentenceMultiple" then
|
if binding.actionId == "copySubtitleMultiple" or binding.actionId == "mineSentenceMultiple" then
|
||||||
start_numeric_selection(binding.actionId, numeric_selection_timeout_ms)
|
start_numeric_selection(binding.actionId, numeric_selection_timeout_ms, binding.key.modifiers)
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,175 @@
|
|||||||
|
package.path = "plugin/subminer/?.lua;" .. package.path
|
||||||
|
|
||||||
|
local session_bindings = require("session_bindings")
|
||||||
|
|
||||||
|
local function assert_true(condition, message)
|
||||||
|
if condition then
|
||||||
|
return
|
||||||
|
end
|
||||||
|
error(message)
|
||||||
|
end
|
||||||
|
|
||||||
|
local artifact_path = ".tmp/test-plugin-session-bindings.json"
|
||||||
|
local is_windows = package.config:sub(1, 1) == "\\"
|
||||||
|
local mkdir_cmd = is_windows and "mkdir .tmp >NUL 2>NUL" or "mkdir -p .tmp"
|
||||||
|
os.execute(mkdir_cmd)
|
||||||
|
local handle = assert(io.open(artifact_path, "w"))
|
||||||
|
handle:write("__SESSION_BINDINGS__")
|
||||||
|
handle:close()
|
||||||
|
|
||||||
|
local recorded = {
|
||||||
|
bindings = {},
|
||||||
|
removed = {},
|
||||||
|
async_calls = {},
|
||||||
|
osd = {},
|
||||||
|
}
|
||||||
|
|
||||||
|
local mp = {}
|
||||||
|
|
||||||
|
function mp.add_forced_key_binding(keys, name, fn)
|
||||||
|
recorded.bindings[#recorded.bindings + 1] = {
|
||||||
|
keys = keys,
|
||||||
|
name = name,
|
||||||
|
fn = fn,
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
function mp.remove_key_binding(name)
|
||||||
|
recorded.removed[#recorded.removed + 1] = name
|
||||||
|
end
|
||||||
|
|
||||||
|
function mp.add_timeout(seconds, callback)
|
||||||
|
return {
|
||||||
|
seconds = seconds,
|
||||||
|
callback = callback,
|
||||||
|
killed = false,
|
||||||
|
kill = function(self)
|
||||||
|
self.killed = true
|
||||||
|
end,
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
function mp.osd_message(message)
|
||||||
|
recorded.osd[#recorded.osd + 1] = message
|
||||||
|
end
|
||||||
|
|
||||||
|
local ctx = {
|
||||||
|
mp = mp,
|
||||||
|
utils = {
|
||||||
|
parse_json = function(raw)
|
||||||
|
if raw ~= "__SESSION_BINDINGS__" then
|
||||||
|
return nil, "unexpected artifact"
|
||||||
|
end
|
||||||
|
return {
|
||||||
|
numericSelectionTimeoutMs = 3000,
|
||||||
|
bindings = {
|
||||||
|
{
|
||||||
|
key = {
|
||||||
|
code = "KeyS",
|
||||||
|
modifiers = { "ctrl", "shift" },
|
||||||
|
},
|
||||||
|
actionType = "session-action",
|
||||||
|
actionId = "mineSentenceMultiple",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key = {
|
||||||
|
code = "KeyL",
|
||||||
|
modifiers = { "ctrl", "shift" },
|
||||||
|
},
|
||||||
|
actionType = "session-action",
|
||||||
|
actionId = "playNextSubtitle",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key = {
|
||||||
|
code = "KeyL",
|
||||||
|
modifiers = { "shift" },
|
||||||
|
},
|
||||||
|
actionType = "mpv-command",
|
||||||
|
command = { "sub-seek", 1 },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
end,
|
||||||
|
},
|
||||||
|
state = {
|
||||||
|
binary_path = "/tmp/subminer",
|
||||||
|
session_binding_names = {},
|
||||||
|
session_numeric_binding_names = {},
|
||||||
|
session_numeric_selection = nil,
|
||||||
|
},
|
||||||
|
process = {
|
||||||
|
check_binary_available = function()
|
||||||
|
return true
|
||||||
|
end,
|
||||||
|
run_binary_command_async = function(args)
|
||||||
|
recorded.async_calls[#recorded.async_calls + 1] = args
|
||||||
|
end,
|
||||||
|
},
|
||||||
|
environment = {
|
||||||
|
resolve_session_bindings_artifact_path = function()
|
||||||
|
return artifact_path
|
||||||
|
end,
|
||||||
|
},
|
||||||
|
log = {
|
||||||
|
subminer_log = function() end,
|
||||||
|
show_osd = function(message)
|
||||||
|
recorded.osd[#recorded.osd + 1] = message
|
||||||
|
end,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
local bindings = session_bindings.create(ctx)
|
||||||
|
assert_true(bindings.register_bindings(), "session bindings should register")
|
||||||
|
|
||||||
|
local starter = nil
|
||||||
|
for _, binding in ipairs(recorded.bindings) do
|
||||||
|
if binding.keys == "Ctrl+S" then
|
||||||
|
starter = binding
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
assert_true(starter ~= nil, "multi-mine starter binding should be registered")
|
||||||
|
|
||||||
|
local play_next = nil
|
||||||
|
for _, binding in ipairs(recorded.bindings) do
|
||||||
|
if binding.keys == "Ctrl+L" then
|
||||||
|
play_next = binding
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
assert_true(play_next ~= nil, "play-next subtitle binding should use mpv shifted-letter form")
|
||||||
|
|
||||||
|
local subtitle_jump = nil
|
||||||
|
for _, binding in ipairs(recorded.bindings) do
|
||||||
|
if binding.keys == "L" then
|
||||||
|
subtitle_jump = binding
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
assert_true(subtitle_jump ~= nil, "shifted subtitle jump binding should use mpv uppercase letter form")
|
||||||
|
|
||||||
|
play_next.fn()
|
||||||
|
local play_next_call = recorded.async_calls[#recorded.async_calls]
|
||||||
|
assert_true(play_next_call ~= nil, "play-next binding should invoke CLI action")
|
||||||
|
assert_true(play_next_call[2] == "--play-next-subtitle", "play-next binding should pass CLI flag")
|
||||||
|
|
||||||
|
starter.fn()
|
||||||
|
|
||||||
|
local modified_digit = nil
|
||||||
|
for _, binding in ipairs(recorded.bindings) do
|
||||||
|
if binding.keys == "Ctrl+Shift+3" then
|
||||||
|
modified_digit = binding
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
assert_true(modified_digit ~= nil, "numeric selection should bind Ctrl+Shift+3")
|
||||||
|
|
||||||
|
modified_digit.fn()
|
||||||
|
|
||||||
|
local call = recorded.async_calls[#recorded.async_calls]
|
||||||
|
assert_true(call ~= nil, "modified digit should invoke CLI action")
|
||||||
|
assert_true(call[1] == "/tmp/subminer", "CLI action should use configured binary")
|
||||||
|
assert_true(call[2] == "--mine-sentence-count", "CLI action should mine sentence count")
|
||||||
|
assert_true(call[3] == "3", "CLI action should pass selected count")
|
||||||
|
|
||||||
|
print("plugin session binding regression tests: OK")
|
||||||
@@ -559,6 +559,49 @@ do
|
|||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
do
|
||||||
|
local recorded, err = run_plugin_scenario({
|
||||||
|
process_list = "",
|
||||||
|
option_overrides = {
|
||||||
|
binary_path = binary_path,
|
||||||
|
auto_start = "yes",
|
||||||
|
auto_start_visible_overlay = "yes",
|
||||||
|
auto_start_pause_until_ready = "yes",
|
||||||
|
socket_path = "/tmp/subminer-socket",
|
||||||
|
},
|
||||||
|
input_ipc_server = "/tmp/subminer-socket",
|
||||||
|
media_title = "Random Movie",
|
||||||
|
files = {
|
||||||
|
[binary_path] = true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
assert_true(recorded ~= nil, "plugin failed to load for pre-ready duplicate auto-start scenario: " .. tostring(err))
|
||||||
|
fire_event(recorded, "file-loaded")
|
||||||
|
fire_event(recorded, "file-loaded")
|
||||||
|
assert_true(recorded.script_messages["subminer-autoplay-ready"] ~= nil, "subminer-autoplay-ready script message not registered")
|
||||||
|
assert_true(
|
||||||
|
count_start_calls(recorded.async_calls) == 1,
|
||||||
|
"pre-ready duplicate auto-start should not issue duplicate --start commands"
|
||||||
|
)
|
||||||
|
assert_true(
|
||||||
|
count_property_set(recorded.property_sets, "pause", true) == 1,
|
||||||
|
"pre-ready duplicate auto-start should not repeat the pause gate"
|
||||||
|
)
|
||||||
|
assert_true(
|
||||||
|
count_property_set(recorded.property_sets, "pause", false) == 0,
|
||||||
|
"pre-ready duplicate auto-start should not resume playback before tokenization is ready"
|
||||||
|
)
|
||||||
|
assert_true(
|
||||||
|
count_osd_message(recorded.osd, "SubMiner: Loading subtitle tokenization...") == 1,
|
||||||
|
"pre-ready duplicate auto-start should not repeat the loading OSD"
|
||||||
|
)
|
||||||
|
recorded.script_messages["subminer-autoplay-ready"]()
|
||||||
|
assert_true(
|
||||||
|
count_property_set(recorded.property_sets, "pause", false) == 1,
|
||||||
|
"autoplay-ready should resume the original pre-ready gate"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
do
|
do
|
||||||
local recorded, err = run_plugin_scenario({
|
local recorded, err = run_plugin_scenario({
|
||||||
process_list = "",
|
process_list = "",
|
||||||
@@ -906,23 +949,23 @@ do
|
|||||||
)
|
)
|
||||||
assert_true(
|
assert_true(
|
||||||
count_control_calls(recorded.async_calls, "--show-visible-overlay") == 4,
|
count_control_calls(recorded.async_calls, "--show-visible-overlay") == 4,
|
||||||
"duplicate pause-until-ready auto-start should re-assert visible overlay on both start and ready events"
|
"duplicate pause-until-ready auto-start should re-assert visible overlay on initial start, ready, and later file load"
|
||||||
)
|
)
|
||||||
assert_true(
|
assert_true(
|
||||||
count_osd_message(recorded.osd, "SubMiner: Loading subtitle tokenization...") == 2,
|
count_osd_message(recorded.osd, "SubMiner: Loading subtitle tokenization...") == 1,
|
||||||
"duplicate pause-until-ready auto-start should arm tokenization loading gate for each file"
|
"duplicate pause-until-ready auto-start should not repeat tokenization loading gate after overlay is running"
|
||||||
)
|
)
|
||||||
assert_true(
|
assert_true(
|
||||||
count_osd_message(recorded.osd, "SubMiner: Subtitle tokenization ready") == 2,
|
count_osd_message(recorded.osd, "SubMiner: Subtitle tokenization ready") == 1,
|
||||||
"duplicate pause-until-ready auto-start should release tokenization gate for each file"
|
"duplicate pause-until-ready auto-start should not wait for a second readiness signal after overlay is running"
|
||||||
)
|
)
|
||||||
assert_true(
|
assert_true(
|
||||||
count_property_set(recorded.property_sets, "pause", true) == 2,
|
count_property_set(recorded.property_sets, "pause", true) == 1,
|
||||||
"duplicate pause-until-ready auto-start should force pause for each file"
|
"duplicate pause-until-ready auto-start should not force pause after overlay is running"
|
||||||
)
|
)
|
||||||
assert_true(
|
assert_true(
|
||||||
count_property_set(recorded.property_sets, "pause", false) == 2,
|
count_property_set(recorded.property_sets, "pause", false) == 1,
|
||||||
"duplicate pause-until-ready auto-start should resume playback for each file"
|
"duplicate pause-until-ready auto-start should not resume a gate that was never rearmed"
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@@ -177,6 +177,44 @@ test('AnkiIntegration.refreshKnownWordCache skips work when highlight mode is di
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('AnkiIntegration notifies when mined note info updates known words', () => {
|
||||||
|
const ctx = createIntegrationTestContext({
|
||||||
|
stateDirPrefix: 'subminer-anki-integration-known-update-',
|
||||||
|
});
|
||||||
|
let notifications = 0;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const integrationState = ctx.integration as unknown as {
|
||||||
|
config: AnkiConnectConfig;
|
||||||
|
appendKnownWordsFromNoteInfo: (noteInfo: {
|
||||||
|
noteId: number;
|
||||||
|
fields: Record<string, { value: string }>;
|
||||||
|
}) => void;
|
||||||
|
};
|
||||||
|
integrationState.config.deck = 'Mining';
|
||||||
|
integrationState.config.knownWords = {
|
||||||
|
...integrationState.config.knownWords,
|
||||||
|
decks: {
|
||||||
|
Mining: ['Word'],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
ctx.integration.setKnownWordCacheUpdatedCallback(() => {
|
||||||
|
notifications += 1;
|
||||||
|
});
|
||||||
|
integrationState.appendKnownWordsFromNoteInfo({
|
||||||
|
noteId: 42,
|
||||||
|
fields: {
|
||||||
|
Word: { value: '食べる' },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(ctx.integration.isKnownWord('食べる'), true);
|
||||||
|
assert.equal(notifications, 1);
|
||||||
|
} finally {
|
||||||
|
cleanupIntegrationTestContext(ctx);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
test('AnkiIntegration.refreshKnownWordCache deduplicates concurrent refreshes', async () => {
|
test('AnkiIntegration.refreshKnownWordCache deduplicates concurrent refreshes', async () => {
|
||||||
let releaseFindNotes: (() => void) | undefined;
|
let releaseFindNotes: (() => void) | undefined;
|
||||||
const findNotesPromise = new Promise<void>((resolve) => {
|
const findNotesPromise = new Promise<void>((resolve) => {
|
||||||
|
|||||||
+21
-1
@@ -148,6 +148,7 @@ export class AnkiIntegration {
|
|||||||
private runtime: AnkiIntegrationRuntime;
|
private runtime: AnkiIntegrationRuntime;
|
||||||
private aiConfig: AiConfig;
|
private aiConfig: AiConfig;
|
||||||
private recordCardsMinedCallback: ((count: number, noteIds?: number[]) => void) | null = null;
|
private recordCardsMinedCallback: ((count: number, noteIds?: number[]) => void) | null = null;
|
||||||
|
private knownWordCacheUpdatedCallback: (() => void) | null = null;
|
||||||
private noteIdRedirects = new Map<number, number>();
|
private noteIdRedirects = new Map<number, number>();
|
||||||
private trackedDuplicateNoteIds = new Map<number, number[]>();
|
private trackedDuplicateNoteIds = new Map<number, number[]>();
|
||||||
|
|
||||||
@@ -552,10 +553,25 @@ export class AnkiIntegration {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.knownWordCache.appendFromNoteInfo({
|
const changed = this.knownWordCache.appendFromNoteInfo({
|
||||||
noteId: noteInfo.noteId,
|
noteId: noteInfo.noteId,
|
||||||
fields: noteInfo.fields,
|
fields: noteInfo.fields,
|
||||||
});
|
});
|
||||||
|
if (changed) {
|
||||||
|
this.notifyKnownWordCacheUpdated();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private notifyKnownWordCacheUpdated(): void {
|
||||||
|
if (!this.knownWordCacheUpdatedCallback) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.knownWordCacheUpdatedCallback();
|
||||||
|
} catch (error) {
|
||||||
|
log.warn('Known-word cache update callback failed:', (error as Error).message);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private getLapisConfig(): {
|
private getLapisConfig(): {
|
||||||
@@ -1267,6 +1283,10 @@ export class AnkiIntegration {
|
|||||||
this.recordCardsMinedCallback = callback;
|
this.recordCardsMinedCallback = callback;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setKnownWordCacheUpdatedCallback(callback: (() => void) | null): void {
|
||||||
|
this.knownWordCacheUpdatedCallback = callback;
|
||||||
|
}
|
||||||
|
|
||||||
resolveCurrentNoteId(noteId: number): number {
|
resolveCurrentNoteId(noteId: number): number {
|
||||||
let resolved = noteId;
|
let resolved = noteId;
|
||||||
const seen = new Set<number>();
|
const seen = new Set<number>();
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ function createManualUpdateService(overrides: Partial<CardCreationDeps> = {}): {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
test('manual clipboard subtitle update replaces expression and sentence audio even when overwriteAudio is disabled', async () => {
|
test('manual clipboard subtitle update replaces sentence audio without touching expression audio', async () => {
|
||||||
const { service, updatedFields, mergeCalls, storedMedia } = createManualUpdateService();
|
const { service, updatedFields, mergeCalls, storedMedia } = createManualUpdateService();
|
||||||
|
|
||||||
await service.updateLastAddedFromClipboard('字幕');
|
await service.updateLastAddedFromClipboard('字幕');
|
||||||
@@ -134,10 +134,44 @@ test('manual clipboard subtitle update replaces expression and sentence audio ev
|
|||||||
assert.equal(updatedFields.length, 1);
|
assert.equal(updatedFields.length, 1);
|
||||||
assert.equal(storedMedia.length, 1);
|
assert.equal(storedMedia.length, 1);
|
||||||
const audioValue = `[sound:${storedMedia[0]}]`;
|
const audioValue = `[sound:${storedMedia[0]}]`;
|
||||||
assert.equal(updatedFields[0]?.ExpressionAudio, audioValue);
|
|
||||||
assert.equal(updatedFields[0]?.SentenceAudio, audioValue);
|
assert.equal(updatedFields[0]?.SentenceAudio, audioValue);
|
||||||
|
assert.equal('ExpressionAudio' in updatedFields[0]!, false);
|
||||||
assert.deepEqual(
|
assert.deepEqual(
|
||||||
mergeCalls.map((call) => call.overwrite),
|
mergeCalls.map((call) => call.overwrite),
|
||||||
[true, true],
|
[true],
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('manual clipboard subtitle update skips audio when sentence audio field is missing', async () => {
|
||||||
|
const { service, updatedFields, mergeCalls, storedMedia } = createManualUpdateService({
|
||||||
|
client: {
|
||||||
|
addNote: async () => 0,
|
||||||
|
addTags: async () => undefined,
|
||||||
|
notesInfo: async () => [
|
||||||
|
{
|
||||||
|
noteId: 42,
|
||||||
|
fields: {
|
||||||
|
Expression: { value: '単語' },
|
||||||
|
Sentence: { value: '' },
|
||||||
|
ExpressionAudio: { value: '[sound:auto-expression.mp3]' },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
updateNoteFields: async (_noteId, fields) => {
|
||||||
|
updatedFields.push(fields);
|
||||||
|
},
|
||||||
|
storeMediaFile: async (filename) => {
|
||||||
|
storedMedia.push(filename);
|
||||||
|
},
|
||||||
|
findNotes: async () => [42],
|
||||||
|
retrieveMediaFile: async () => '',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await service.updateLastAddedFromClipboard('字幕');
|
||||||
|
|
||||||
|
assert.equal(storedMedia.length, 1);
|
||||||
|
assert.equal(updatedFields.length, 1);
|
||||||
|
assert.deepEqual(updatedFields[0], { Sentence: '字幕' });
|
||||||
|
assert.equal(mergeCalls.length, 0);
|
||||||
|
});
|
||||||
|
|||||||
@@ -218,11 +218,7 @@ export class CardCreationService {
|
|||||||
fields,
|
fields,
|
||||||
this.deps.getConfig(),
|
this.deps.getConfig(),
|
||||||
);
|
);
|
||||||
const sentenceAudioField = this.getResolvedSentenceAudioFieldName(noteInfo);
|
const sentenceAudioField = this.getResolvedSentenceOnlyAudioFieldName(noteInfo);
|
||||||
const expressionAudioField = this.deps.resolveConfiguredFieldName(
|
|
||||||
noteInfo,
|
|
||||||
this.deps.getConfig().fields?.audio || 'ExpressionAudio',
|
|
||||||
);
|
|
||||||
const sentenceField = this.deps.getEffectiveSentenceCardConfig().sentenceField;
|
const sentenceField = this.deps.getEffectiveSentenceCardConfig().sentenceField;
|
||||||
|
|
||||||
const sentence = blocks.join(' ');
|
const sentence = blocks.join(' ');
|
||||||
@@ -252,22 +248,15 @@ export class CardCreationService {
|
|||||||
|
|
||||||
if (audioBuffer) {
|
if (audioBuffer) {
|
||||||
await this.deps.client.storeMediaFile(audioFilename, audioBuffer);
|
await this.deps.client.storeMediaFile(audioFilename, audioBuffer);
|
||||||
if (sentenceAudioField || expressionAudioField) {
|
if (sentenceAudioField) {
|
||||||
const audioValue = `[sound:${audioFilename}]`;
|
const audioValue = `[sound:${audioFilename}]`;
|
||||||
const audioFields = new Set(
|
const existingAudio = noteInfo.fields[sentenceAudioField]?.value || '';
|
||||||
[sentenceAudioField, expressionAudioField].filter(
|
// Manual clipboard updates intentionally replace old captured sentence audio.
|
||||||
(fieldName): fieldName is string => Boolean(fieldName),
|
updatedFields[sentenceAudioField] = this.deps.mergeFieldValue(
|
||||||
),
|
existingAudio,
|
||||||
|
audioValue,
|
||||||
|
true,
|
||||||
);
|
);
|
||||||
for (const audioField of audioFields) {
|
|
||||||
const existingAudio = noteInfo.fields[audioField]?.value || '';
|
|
||||||
// Manual clipboard updates intentionally replace old captured audio.
|
|
||||||
updatedFields[audioField] = this.deps.mergeFieldValue(
|
|
||||||
existingAudio,
|
|
||||||
audioValue,
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
miscInfoFilename = audioFilename;
|
miscInfoFilename = audioFilename;
|
||||||
updatePerformed = true;
|
updatePerformed = true;
|
||||||
@@ -732,6 +721,13 @@ export class CardCreationService {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private getResolvedSentenceOnlyAudioFieldName(noteInfo: CardCreationNoteInfo): string | null {
|
||||||
|
return this.deps.resolveNoteFieldName(
|
||||||
|
noteInfo,
|
||||||
|
this.deps.getEffectiveSentenceCardConfig().audioField || 'SentenceAudio',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
private createPendingNoteInfo(fields: Record<string, string>): CardCreationNoteInfo {
|
private createPendingNoteInfo(fields: Record<string, string>): CardCreationNoteInfo {
|
||||||
return {
|
return {
|
||||||
noteId: -1,
|
noteId: -1,
|
||||||
|
|||||||
@@ -520,6 +520,51 @@ test('KnownWordCacheManager uses the current deck fields for immediate append',
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('KnownWordCacheManager reports immediate append cache clears as mutations', () => {
|
||||||
|
const config: AnkiConnectConfig = {
|
||||||
|
fields: {
|
||||||
|
word: 'Expression',
|
||||||
|
},
|
||||||
|
knownWords: {
|
||||||
|
highlightEnabled: true,
|
||||||
|
refreshMinutes: 60,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
const { manager, statePath, cleanup } = createKnownWordCacheHarness(config);
|
||||||
|
|
||||||
|
try {
|
||||||
|
fs.writeFileSync(
|
||||||
|
statePath,
|
||||||
|
JSON.stringify({
|
||||||
|
version: 2,
|
||||||
|
refreshedAtMs: Date.now(),
|
||||||
|
scope: '{"refreshMinutes":60,"scope":"is:note","fieldsWord":"Expression"}',
|
||||||
|
words: ['猫'],
|
||||||
|
notes: {
|
||||||
|
'1': ['猫'],
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
'utf-8',
|
||||||
|
);
|
||||||
|
manager.startLifecycle();
|
||||||
|
assert.equal(manager.isKnownWord('猫'), true);
|
||||||
|
|
||||||
|
config.fields = { word: 'Word' };
|
||||||
|
const changed = manager.appendFromNoteInfo({
|
||||||
|
noteId: 2,
|
||||||
|
fields: {
|
||||||
|
Word: { value: '' },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(changed, true);
|
||||||
|
assert.equal(manager.isKnownWord('猫'), false);
|
||||||
|
} finally {
|
||||||
|
manager.stopLifecycle();
|
||||||
|
cleanup();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
test('KnownWordCacheManager skips immediate append when addMinedWordsImmediately is disabled', () => {
|
test('KnownWordCacheManager skips immediate append when addMinedWordsImmediately is disabled', () => {
|
||||||
const config: AnkiConnectConfig = {
|
const config: AnkiConnectConfig = {
|
||||||
knownWords: {
|
knownWords: {
|
||||||
|
|||||||
@@ -165,13 +165,15 @@ export class KnownWordCacheManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
appendFromNoteInfo(noteInfo: KnownWordCacheNoteInfo): void {
|
appendFromNoteInfo(noteInfo: KnownWordCacheNoteInfo): boolean {
|
||||||
if (!this.isKnownWordCacheEnabled() || !this.shouldAddMinedWordsImmediately()) {
|
if (!this.isKnownWordCacheEnabled() || !this.shouldAddMinedWordsImmediately()) {
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let didMutateCache = false;
|
||||||
const currentStateKey = this.getKnownWordCacheStateKey();
|
const currentStateKey = this.getKnownWordCacheStateKey();
|
||||||
if (this.knownWordsStateKey && this.knownWordsStateKey !== currentStateKey) {
|
if (this.knownWordsStateKey && this.knownWordsStateKey !== currentStateKey) {
|
||||||
|
didMutateCache = this.knownWords.size > 0 || this.noteWordsById.size > 0;
|
||||||
this.clearKnownWordCacheState();
|
this.clearKnownWordCacheState();
|
||||||
}
|
}
|
||||||
if (!this.knownWordsStateKey) {
|
if (!this.knownWordsStateKey) {
|
||||||
@@ -180,13 +182,13 @@ export class KnownWordCacheManager {
|
|||||||
|
|
||||||
const preferredFields = this.getImmediateAppendFields();
|
const preferredFields = this.getImmediateAppendFields();
|
||||||
if (!preferredFields) {
|
if (!preferredFields) {
|
||||||
return;
|
return didMutateCache;
|
||||||
}
|
}
|
||||||
|
|
||||||
const nextWords = this.extractNormalizedKnownWordsFromNoteInfo(noteInfo, preferredFields);
|
const nextWords = this.extractNormalizedKnownWordsFromNoteInfo(noteInfo, preferredFields);
|
||||||
const changed = this.replaceNoteSnapshot(noteInfo.noteId, nextWords);
|
const changed = this.replaceNoteSnapshot(noteInfo.noteId, nextWords);
|
||||||
if (!changed) {
|
if (!changed) {
|
||||||
return;
|
return didMutateCache;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.knownWordsLastRefreshedAtMs <= 0) {
|
if (this.knownWordsLastRefreshedAtMs <= 0) {
|
||||||
@@ -199,6 +201,7 @@ export class KnownWordCacheManager {
|
|||||||
`wordCount=${nextWords.length}`,
|
`wordCount=${nextWords.length}`,
|
||||||
`scope=${getKnownWordCacheScopeForConfig(this.deps.getConfig())}`,
|
`scope=${getKnownWordCacheScopeForConfig(this.deps.getConfig())}`,
|
||||||
);
|
);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
clearKnownWordCacheState(): void {
|
clearKnownWordCacheState(): void {
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ export const CORE_DEFAULT_CONFIG: Pick<
|
|||||||
openCharacterDictionary: 'CommandOrControl+Alt+A',
|
openCharacterDictionary: 'CommandOrControl+Alt+A',
|
||||||
openRuntimeOptions: 'CommandOrControl+Shift+O',
|
openRuntimeOptions: 'CommandOrControl+Shift+O',
|
||||||
openJimaku: 'Ctrl+Shift+J',
|
openJimaku: 'Ctrl+Shift+J',
|
||||||
openSessionHelp: 'CommandOrControl+Shift+H',
|
openSessionHelp: 'CommandOrControl+Slash',
|
||||||
openControllerSelect: 'Alt+C',
|
openControllerSelect: 'Alt+C',
|
||||||
openControllerDebug: 'Alt+Shift+C',
|
openControllerDebug: 'Alt+Shift+C',
|
||||||
toggleSubtitleSidebar: 'Backslash',
|
toggleSubtitleSidebar: 'Backslash',
|
||||||
|
|||||||
@@ -92,3 +92,11 @@ test('default keybindings include fullscreen on F', () => {
|
|||||||
);
|
);
|
||||||
assert.deepEqual(keybindingMap.get('KeyF'), ['cycle', 'fullscreen']);
|
assert.deepEqual(keybindingMap.get('KeyF'), ['cycle', 'fullscreen']);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('default keybindings include replay and next subtitle controls', () => {
|
||||||
|
const keybindingMap = new Map(
|
||||||
|
DEFAULT_KEYBINDINGS.map((binding) => [binding.key, binding.command]),
|
||||||
|
);
|
||||||
|
assert.deepEqual(keybindingMap.get('Ctrl+Shift+KeyH'), ['__replay-subtitle']);
|
||||||
|
assert.deepEqual(keybindingMap.get('Ctrl+Shift+KeyL'), ['__play-next-subtitle']);
|
||||||
|
});
|
||||||
|
|||||||
@@ -277,6 +277,8 @@ function createMockTracker(
|
|||||||
getSessionTimeline: async () => [],
|
getSessionTimeline: async () => [],
|
||||||
getSessionEvents: async () => [],
|
getSessionEvents: async () => [],
|
||||||
getVocabularyStats: async () => VOCABULARY_STATS,
|
getVocabularyStats: async () => VOCABULARY_STATS,
|
||||||
|
getStatsExcludedWords: async () => [],
|
||||||
|
replaceStatsExcludedWords: async () => {},
|
||||||
getKanjiStats: async () => KANJI_STATS,
|
getKanjiStats: async () => KANJI_STATS,
|
||||||
getWordOccurrences: async () => OCCURRENCES,
|
getWordOccurrences: async () => OCCURRENCES,
|
||||||
getKanjiOccurrences: async () => OCCURRENCES,
|
getKanjiOccurrences: async () => OCCURRENCES,
|
||||||
@@ -362,7 +364,7 @@ describe('stats server API routes', () => {
|
|||||||
assert.ok(Array.isArray(body));
|
assert.ok(Array.isArray(body));
|
||||||
});
|
});
|
||||||
|
|
||||||
it('GET /api/stats/sessions enriches each session with known-word metrics when cache exists', async () => {
|
it('GET /api/stats/sessions enriches known-word metrics using filtered persisted totals', async () => {
|
||||||
await withTempDir(async (dir) => {
|
await withTempDir(async (dir) => {
|
||||||
const cachePath = path.join(dir, 'known-words.json');
|
const cachePath = path.join(dir, 'known-words.json');
|
||||||
fs.writeFileSync(
|
fs.writeFileSync(
|
||||||
@@ -391,7 +393,7 @@ describe('stats server API routes', () => {
|
|||||||
const body = await res.json();
|
const body = await res.json();
|
||||||
const first = body[0];
|
const first = body[0];
|
||||||
assert.equal(first.knownWordsSeen, 2);
|
assert.equal(first.knownWordsSeen, 2);
|
||||||
assert.equal(first.knownWordRate, 2.5);
|
assert.equal(first.knownWordRate, 66.7);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -436,7 +438,7 @@ describe('stats server API routes', () => {
|
|||||||
assert.equal(seenLimit, undefined);
|
assert.equal(seenLimit, undefined);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('GET /api/stats/sessions/:id/known-words-timeline preserves line positions and counts known occurrences', async () => {
|
it('GET /api/stats/sessions/:id/known-words-timeline preserves line positions and counts filtered totals', async () => {
|
||||||
await withTempDir(async (dir) => {
|
await withTempDir(async (dir) => {
|
||||||
const cachePath = path.join(dir, 'known-words.json');
|
const cachePath = path.join(dir, 'known-words.json');
|
||||||
fs.writeFileSync(
|
fs.writeFileSync(
|
||||||
@@ -461,8 +463,10 @@ describe('stats server API routes', () => {
|
|||||||
const res = await app.request('/api/stats/sessions/1/known-words-timeline');
|
const res = await app.request('/api/stats/sessions/1/known-words-timeline');
|
||||||
assert.equal(res.status, 200);
|
assert.equal(res.status, 200);
|
||||||
assert.deepEqual(await res.json(), [
|
assert.deepEqual(await res.json(), [
|
||||||
{ linesSeen: 1, knownWordsSeen: 2 },
|
{ linesSeen: 0, knownWordsSeen: 0, totalWordsSeen: 0 },
|
||||||
{ linesSeen: 3, knownWordsSeen: 3 },
|
{ linesSeen: 1, knownWordsSeen: 2, totalWordsSeen: 2 },
|
||||||
|
{ linesSeen: 2, knownWordsSeen: 2, totalWordsSeen: 2 },
|
||||||
|
{ linesSeen: 3, knownWordsSeen: 3, totalWordsSeen: 7 },
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -730,6 +734,65 @@ describe('stats server API routes', () => {
|
|||||||
assert.equal(body[0].pos3, null);
|
assert.equal(body[0].pos3, null);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('GET /api/stats/excluded-words returns tracker exclusion rows', async () => {
|
||||||
|
const app = createStatsApp(
|
||||||
|
createMockTracker({
|
||||||
|
getStatsExcludedWords: async () => [
|
||||||
|
{ headword: '猫', word: '猫', reading: 'ねこ' },
|
||||||
|
{ headword: 'する', word: 'する', reading: 'する' },
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
const res = await app.request('/api/stats/excluded-words');
|
||||||
|
assert.equal(res.status, 200);
|
||||||
|
assert.deepEqual(await res.json(), [
|
||||||
|
{ headword: '猫', word: '猫', reading: 'ねこ' },
|
||||||
|
{ headword: 'する', word: 'する', reading: 'する' },
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('PUT /api/stats/excluded-words replaces tracker exclusion rows', async () => {
|
||||||
|
let seenWords: unknown = null;
|
||||||
|
const app = createStatsApp(
|
||||||
|
createMockTracker({
|
||||||
|
replaceStatsExcludedWords: async (words: unknown) => {
|
||||||
|
seenWords = words;
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
const res = await app.request('/api/stats/excluded-words', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
words: [
|
||||||
|
{ headword: '猫', word: '猫', reading: 'ねこ' },
|
||||||
|
{ headword: 'する', word: 'する', reading: 'する' },
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(res.status, 200);
|
||||||
|
assert.deepEqual(await res.json(), { ok: true });
|
||||||
|
assert.deepEqual(seenWords, [
|
||||||
|
{ headword: '猫', word: '猫', reading: 'ねこ' },
|
||||||
|
{ headword: 'する', word: 'する', reading: 'する' },
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('PUT /api/stats/excluded-words rejects malformed rows', async () => {
|
||||||
|
const app = createStatsApp(createMockTracker());
|
||||||
|
|
||||||
|
const res = await app.request('/api/stats/excluded-words', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ words: [{ headword: '猫', word: 7, reading: 'ねこ' }] }),
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(res.status, 400);
|
||||||
|
});
|
||||||
|
|
||||||
it('GET /api/stats/anime returns anime library', async () => {
|
it('GET /api/stats/anime returns anime library', async () => {
|
||||||
const app = createStatsApp(createMockTracker());
|
const app = createStatsApp(createMockTracker());
|
||||||
const res = await app.request('/api/stats/anime');
|
const res = await app.request('/api/stats/anime');
|
||||||
|
|||||||
@@ -38,6 +38,24 @@ function createPassthroughStorage(): SafeStorageLike {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function createTransientUnavailableStorage(): SafeStorageLike & {
|
||||||
|
setAvailable: (next: boolean) => void;
|
||||||
|
} {
|
||||||
|
let available = false;
|
||||||
|
return {
|
||||||
|
isEncryptionAvailable: () => available,
|
||||||
|
encryptString: (value: string) => Buffer.from(`enc:${value}`, 'utf-8'),
|
||||||
|
decryptString: (value: Buffer) => {
|
||||||
|
const raw = value.toString('utf-8');
|
||||||
|
return raw.startsWith('enc:') ? raw.slice(4) : raw;
|
||||||
|
},
|
||||||
|
getSelectedStorageBackend: () => (available ? 'gnome_libsecret' : 'unknown'),
|
||||||
|
setAvailable(next: boolean) {
|
||||||
|
available = next;
|
||||||
|
},
|
||||||
|
} as SafeStorageLike & { setAvailable: (next: boolean) => void };
|
||||||
|
}
|
||||||
|
|
||||||
test('anilist token store saves and loads encrypted token', () => {
|
test('anilist token store saves and loads encrypted token', () => {
|
||||||
const filePath = createTempTokenFile();
|
const filePath = createTempTokenFile();
|
||||||
const store = createAnilistTokenStore(filePath, createLogger(), createStorage(true));
|
const store = createAnilistTokenStore(filePath, createLogger(), createStorage(true));
|
||||||
@@ -61,6 +79,27 @@ test('anilist token store refuses to persist token when encryption unavailable',
|
|||||||
assert.equal(store.loadToken(), null);
|
assert.equal(store.loadToken(), null);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('anilist token store retries safeStorage after transient encryption unavailability', () => {
|
||||||
|
const filePath = createTempTokenFile();
|
||||||
|
fs.writeFileSync(
|
||||||
|
filePath,
|
||||||
|
JSON.stringify({
|
||||||
|
encryptedToken: Buffer.from('enc:stored-token', 'utf-8').toString('base64'),
|
||||||
|
updatedAt: Date.now(),
|
||||||
|
}),
|
||||||
|
'utf-8',
|
||||||
|
);
|
||||||
|
const storage = createTransientUnavailableStorage();
|
||||||
|
const store = createAnilistTokenStore(filePath, createLogger(), storage);
|
||||||
|
|
||||||
|
assert.equal(store.loadToken(), null);
|
||||||
|
storage.setAvailable(true);
|
||||||
|
|
||||||
|
assert.equal(store.loadToken(), 'stored-token');
|
||||||
|
assert.equal(store.saveToken('new-token'), true);
|
||||||
|
assert.equal(store.loadToken(), 'new-token');
|
||||||
|
});
|
||||||
|
|
||||||
test('anilist token store migrates legacy plaintext to encrypted', () => {
|
test('anilist token store migrates legacy plaintext to encrypted', () => {
|
||||||
const filePath = createTempTokenFile();
|
const filePath = createTempTokenFile();
|
||||||
fs.writeFileSync(
|
fs.writeFileSync(
|
||||||
|
|||||||
@@ -69,7 +69,6 @@ export function createAnilistTokenStore(
|
|||||||
`AniList token encryption unavailable: safeStorage.isEncryptionAvailable() is false. ` +
|
`AniList token encryption unavailable: safeStorage.isEncryptionAvailable() is false. ` +
|
||||||
`Context: ${getSafeStorageDebugContext()}`,
|
`Context: ${getSafeStorageDebugContext()}`,
|
||||||
);
|
);
|
||||||
safeStorageUsable = false;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const probe = storage.encryptString('__subminer_anilist_probe__');
|
const probe = storage.encryptString('__subminer_anilist_probe__');
|
||||||
@@ -77,7 +76,6 @@ export function createAnilistTokenStore(
|
|||||||
notifyUser(
|
notifyUser(
|
||||||
'AniList token encryption probe failed: safeStorage.encryptString() returned plaintext bytes.',
|
'AniList token encryption probe failed: safeStorage.encryptString() returned plaintext bytes.',
|
||||||
);
|
);
|
||||||
safeStorageUsable = false;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const roundTrip = storage.decryptString(probe);
|
const roundTrip = storage.decryptString(probe);
|
||||||
@@ -85,7 +83,6 @@ export function createAnilistTokenStore(
|
|||||||
notifyUser(
|
notifyUser(
|
||||||
'AniList token encryption probe failed: encrypt/decrypt round trip returned unexpected content.',
|
'AniList token encryption probe failed: encrypt/decrypt round trip returned unexpected content.',
|
||||||
);
|
);
|
||||||
safeStorageUsable = false;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
safeStorageUsable = true;
|
safeStorageUsable = true;
|
||||||
@@ -96,7 +93,6 @@ export function createAnilistTokenStore(
|
|||||||
`AniList token encryption unavailable: safeStorage probe threw an error. ` +
|
`AniList token encryption unavailable: safeStorage probe threw an error. ` +
|
||||||
`Context: ${getSafeStorageDebugContext()}`,
|
`Context: ${getSafeStorageDebugContext()}`,
|
||||||
);
|
);
|
||||||
safeStorageUsable = false;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -22,6 +22,44 @@ test('guessAnilistMediaInfo uses guessit output when available', async () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('guessAnilistMediaInfo fills missing guessit episode from filename parser', async () => {
|
||||||
|
const result = await guessAnilistMediaInfo('/tmp/Guessit Title S01E09.mkv', null, {
|
||||||
|
runGuessit: async () => JSON.stringify({ title: 'Guessit Title' }),
|
||||||
|
});
|
||||||
|
assert.deepEqual(result, {
|
||||||
|
title: 'Guessit Title',
|
||||||
|
season: 1,
|
||||||
|
episode: 9,
|
||||||
|
source: 'guessit',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
test('guessAnilistMediaInfo ignores low-confidence parser details when guessit omits them', async () => {
|
||||||
|
const result = await guessAnilistMediaInfo('/tmp/Season 2/Guessit Title.mkv', null, {
|
||||||
|
runGuessit: async () => JSON.stringify({ title: 'Guessit Title' }),
|
||||||
|
});
|
||||||
|
assert.deepEqual(result, {
|
||||||
|
title: 'Guessit Title',
|
||||||
|
season: null,
|
||||||
|
episode: null,
|
||||||
|
source: 'guessit',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
test('guessAnilistMediaInfo parses Little Witch Academia release filename', async () => {
|
||||||
|
const filename =
|
||||||
|
'/tmp/Little Witch Academia (2017) - S01E02 - 002 - Papiliodia [Bluray-1080p][10bit][h265][AC3 2.0][JA].mkv';
|
||||||
|
const result = await guessAnilistMediaInfo(filename, null, {
|
||||||
|
runGuessit: async () => JSON.stringify({ title: 'Little Witch Academia' }),
|
||||||
|
});
|
||||||
|
assert.deepEqual(result, {
|
||||||
|
title: 'Little Witch Academia',
|
||||||
|
season: 1,
|
||||||
|
episode: 2,
|
||||||
|
source: 'guessit',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
test('guessAnilistMediaInfo falls back to parser when guessit fails', async () => {
|
test('guessAnilistMediaInfo falls back to parser when guessit fails', async () => {
|
||||||
const result = await guessAnilistMediaInfo('/tmp/My Anime S01E03.mkv', null, {
|
const result = await guessAnilistMediaInfo('/tmp/My Anime S01E03.mkv', null, {
|
||||||
runGuessit: async () => {
|
runGuessit: async () => {
|
||||||
@@ -54,7 +92,7 @@ test('guessAnilistMediaInfo uses basename for guessit input', async () => {
|
|||||||
]);
|
]);
|
||||||
assert.deepEqual(result, {
|
assert.deepEqual(result, {
|
||||||
title: 'Rascal Does Not Dream of Bunny Girl Senpai',
|
title: 'Rascal Does Not Dream of Bunny Girl Senpai',
|
||||||
season: null,
|
season: 1,
|
||||||
episode: 1,
|
episode: 1,
|
||||||
source: 'guessit',
|
source: 'guessit',
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -236,12 +236,14 @@ export async function guessAnilistMediaInfo(
|
|||||||
const season = firstPositiveInteger(parsed.season);
|
const season = firstPositiveInteger(parsed.season);
|
||||||
const year = firstYear(parsed.year);
|
const year = firstYear(parsed.year);
|
||||||
if (title) {
|
if (title) {
|
||||||
|
const fallback = parseMediaInfo(target);
|
||||||
|
const canUseFallbackDetails = fallback.confidence !== 'low';
|
||||||
return {
|
return {
|
||||||
title: buildGuessitTitle(title, alternativeTitle),
|
title: buildGuessitTitle(title, alternativeTitle),
|
||||||
...(alternativeTitle ? { alternativeTitle } : {}),
|
...(alternativeTitle ? { alternativeTitle } : {}),
|
||||||
...(year ? { year } : {}),
|
...(year ? { year } : {}),
|
||||||
season,
|
season: season ?? (canUseFallbackDetails ? fallback.season : null),
|
||||||
episode,
|
episode: episode ?? (canUseFallbackDetails ? fallback.episode : null),
|
||||||
source: 'guessit',
|
source: 'guessit',
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,200 @@
|
|||||||
|
import assert from 'node:assert/strict';
|
||||||
|
import test from 'node:test';
|
||||||
|
import {
|
||||||
|
buildHyprlandPlacementDispatches,
|
||||||
|
ensureHyprlandWindowFloatingByTitle,
|
||||||
|
findHyprlandWindowForPlacement,
|
||||||
|
shouldAttemptHyprlandWindowPlacement,
|
||||||
|
} from './hyprland-window-placement';
|
||||||
|
|
||||||
|
test('shouldAttemptHyprlandWindowPlacement only enables on Hyprland Linux sessions', () => {
|
||||||
|
assert.equal(
|
||||||
|
shouldAttemptHyprlandWindowPlacement('linux', {
|
||||||
|
HYPRLAND_INSTANCE_SIGNATURE: 'abc',
|
||||||
|
}),
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
assert.equal(
|
||||||
|
shouldAttemptHyprlandWindowPlacement('linux', {
|
||||||
|
WAYLAND_DISPLAY: 'wayland-1',
|
||||||
|
}),
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
assert.equal(
|
||||||
|
shouldAttemptHyprlandWindowPlacement('darwin', {
|
||||||
|
HYPRLAND_INSTANCE_SIGNATURE: 'abc',
|
||||||
|
}),
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('findHyprlandWindowForPlacement matches current process by title', () => {
|
||||||
|
const client = findHyprlandWindowForPlacement(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
address: '0xother',
|
||||||
|
pid: 123,
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
mapped: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
address: '0xmatch',
|
||||||
|
pid: 456,
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
mapped: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
{
|
||||||
|
pid: 456,
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
assert.equal(client?.address, '0xmatch');
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildHyprlandPlacementDispatches floats tiled overlay windows without pinning them', () => {
|
||||||
|
assert.deepEqual(
|
||||||
|
buildHyprlandPlacementDispatches({
|
||||||
|
address: '0xabc',
|
||||||
|
floating: false,
|
||||||
|
pinned: false,
|
||||||
|
}),
|
||||||
|
[['dispatch', 'setfloating', 'address:0xabc']],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildHyprlandPlacementDispatches force-aligns floating overlay windows to target bounds', () => {
|
||||||
|
assert.deepEqual(
|
||||||
|
buildHyprlandPlacementDispatches(
|
||||||
|
{
|
||||||
|
address: '0xabc',
|
||||||
|
floating: true,
|
||||||
|
pinned: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
x: 0,
|
||||||
|
y: 0,
|
||||||
|
width: 1920,
|
||||||
|
height: 1080,
|
||||||
|
},
|
||||||
|
),
|
||||||
|
[
|
||||||
|
['dispatch', 'movewindowpixel', 'exact 0 0,address:0xabc'],
|
||||||
|
['dispatch', 'resizewindowpixel', 'exact 1920 1080,address:0xabc'],
|
||||||
|
['dispatch', 'setprop', 'address:0xabc rounding 0'],
|
||||||
|
['dispatch', 'setprop', 'address:0xabc border_size 0'],
|
||||||
|
['dispatch', 'setprop', 'address:0xabc no_shadow 1'],
|
||||||
|
['dispatch', 'setprop', 'address:0xabc no_blur 1'],
|
||||||
|
['dispatch', 'setprop', 'address:0xabc decorate 0'],
|
||||||
|
],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildHyprlandPlacementDispatches does not pin already floating overlay windows', () => {
|
||||||
|
assert.deepEqual(
|
||||||
|
buildHyprlandPlacementDispatches({
|
||||||
|
address: '0xabc',
|
||||||
|
floating: true,
|
||||||
|
pinned: false,
|
||||||
|
}),
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('buildHyprlandPlacementDispatches unpins previously pinned overlay windows', () => {
|
||||||
|
assert.deepEqual(
|
||||||
|
buildHyprlandPlacementDispatches({
|
||||||
|
address: '0xabc',
|
||||||
|
floating: true,
|
||||||
|
pinned: true,
|
||||||
|
}),
|
||||||
|
[['dispatch', 'pin', 'address:0xabc']],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('ensureHyprlandWindowFloatingByTitle dispatches float-only placement for matching tiled window', () => {
|
||||||
|
const calls: unknown[][] = [];
|
||||||
|
const placed = ensureHyprlandWindowFloatingByTitle({
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
platform: 'linux',
|
||||||
|
env: {
|
||||||
|
HYPRLAND_INSTANCE_SIGNATURE: 'abc',
|
||||||
|
},
|
||||||
|
pid: 456,
|
||||||
|
execFileSync: ((command: string, args: string[], options: unknown) => {
|
||||||
|
calls.push([command, args, options]);
|
||||||
|
if (args.join(' ') === '-j clients') {
|
||||||
|
return JSON.stringify([
|
||||||
|
{
|
||||||
|
address: '0xmatch',
|
||||||
|
pid: 456,
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
mapped: true,
|
||||||
|
floating: false,
|
||||||
|
pinned: false,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
return '';
|
||||||
|
}) as never,
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(placed, true);
|
||||||
|
assert.deepEqual(
|
||||||
|
calls.map(([, args]) => args),
|
||||||
|
[
|
||||||
|
['-j', 'clients'],
|
||||||
|
['dispatch', 'setfloating', 'address:0xmatch'],
|
||||||
|
],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('ensureHyprlandWindowFloatingByTitle dispatches exact Hyprland geometry when bounds are provided', () => {
|
||||||
|
const calls: unknown[][] = [];
|
||||||
|
const placed = ensureHyprlandWindowFloatingByTitle({
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
platform: 'linux',
|
||||||
|
env: {
|
||||||
|
HYPRLAND_INSTANCE_SIGNATURE: 'abc',
|
||||||
|
},
|
||||||
|
pid: 456,
|
||||||
|
bounds: {
|
||||||
|
x: 0,
|
||||||
|
y: 0,
|
||||||
|
width: 1920,
|
||||||
|
height: 1080,
|
||||||
|
},
|
||||||
|
execFileSync: ((command: string, args: string[], options: unknown) => {
|
||||||
|
calls.push([command, args, options]);
|
||||||
|
if (args.join(' ') === '-j clients') {
|
||||||
|
return JSON.stringify([
|
||||||
|
{
|
||||||
|
address: '0xmatch',
|
||||||
|
pid: 456,
|
||||||
|
title: 'SubMiner Stats',
|
||||||
|
mapped: true,
|
||||||
|
floating: true,
|
||||||
|
pinned: false,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
return '';
|
||||||
|
}) as never,
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.equal(placed, true);
|
||||||
|
assert.deepEqual(
|
||||||
|
calls.map(([, args]) => args),
|
||||||
|
[
|
||||||
|
['-j', 'clients'],
|
||||||
|
['dispatch', 'movewindowpixel', 'exact 0 0,address:0xmatch'],
|
||||||
|
['dispatch', 'resizewindowpixel', 'exact 1920 1080,address:0xmatch'],
|
||||||
|
['dispatch', 'setprop', 'address:0xmatch rounding 0'],
|
||||||
|
['dispatch', 'setprop', 'address:0xmatch border_size 0'],
|
||||||
|
['dispatch', 'setprop', 'address:0xmatch no_shadow 1'],
|
||||||
|
['dispatch', 'setprop', 'address:0xmatch no_blur 1'],
|
||||||
|
['dispatch', 'setprop', 'address:0xmatch decorate 0'],
|
||||||
|
],
|
||||||
|
);
|
||||||
|
});
|
||||||
@@ -0,0 +1,156 @@
|
|||||||
|
import { execFileSync } from 'node:child_process';
|
||||||
|
|
||||||
|
export interface HyprlandPlacementClient {
|
||||||
|
address?: string;
|
||||||
|
floating?: boolean;
|
||||||
|
hidden?: boolean;
|
||||||
|
initialTitle?: string;
|
||||||
|
mapped?: boolean;
|
||||||
|
pid?: number;
|
||||||
|
pinned?: boolean;
|
||||||
|
title?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface HyprlandPlacementBounds {
|
||||||
|
x: number;
|
||||||
|
y: number;
|
||||||
|
width: number;
|
||||||
|
height: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecFileSync = typeof execFileSync;
|
||||||
|
|
||||||
|
export function shouldAttemptHyprlandWindowPlacement(
|
||||||
|
platform: NodeJS.Platform = process.platform,
|
||||||
|
env: NodeJS.ProcessEnv = process.env,
|
||||||
|
): boolean {
|
||||||
|
return platform === 'linux' && Boolean(env.HYPRLAND_INSTANCE_SIGNATURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseHyprlandClients(output: string): HyprlandPlacementClient[] {
|
||||||
|
const payloadStart = output.indexOf('[');
|
||||||
|
if (payloadStart < 0) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const parsed = JSON.parse(output.slice(payloadStart)) as unknown;
|
||||||
|
return Array.isArray(parsed) ? (parsed as HyprlandPlacementClient[]) : [];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function findHyprlandWindowForPlacement(
|
||||||
|
clients: HyprlandPlacementClient[],
|
||||||
|
options: {
|
||||||
|
pid: number;
|
||||||
|
title: string;
|
||||||
|
},
|
||||||
|
): HyprlandPlacementClient | null {
|
||||||
|
const title = options.title.trim();
|
||||||
|
if (!title) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
clients.find(
|
||||||
|
(client) =>
|
||||||
|
client.pid === options.pid &&
|
||||||
|
client.address &&
|
||||||
|
client.mapped !== false &&
|
||||||
|
client.hidden !== true &&
|
||||||
|
(client.title === title || client.initialTitle === title),
|
||||||
|
) ?? null
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function buildHyprlandPlacementDispatches(
|
||||||
|
client: HyprlandPlacementClient,
|
||||||
|
bounds?: HyprlandPlacementBounds | null,
|
||||||
|
): string[][] {
|
||||||
|
if (!client.address) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const windowAddress = `address:${client.address}`;
|
||||||
|
const dispatches: string[][] = [];
|
||||||
|
if (client.floating !== true) {
|
||||||
|
dispatches.push(['dispatch', 'setfloating', windowAddress]);
|
||||||
|
}
|
||||||
|
if (client.pinned === true) {
|
||||||
|
dispatches.push(['dispatch', 'pin', windowAddress]);
|
||||||
|
}
|
||||||
|
const roundedBounds = roundPlacementBounds(bounds);
|
||||||
|
if (roundedBounds) {
|
||||||
|
dispatches.push([
|
||||||
|
'dispatch',
|
||||||
|
'movewindowpixel',
|
||||||
|
`exact ${roundedBounds.x} ${roundedBounds.y},${windowAddress}`,
|
||||||
|
]);
|
||||||
|
dispatches.push([
|
||||||
|
'dispatch',
|
||||||
|
'resizewindowpixel',
|
||||||
|
`exact ${roundedBounds.width} ${roundedBounds.height},${windowAddress}`,
|
||||||
|
]);
|
||||||
|
dispatches.push(['dispatch', 'setprop', `${windowAddress} rounding 0`]);
|
||||||
|
dispatches.push(['dispatch', 'setprop', `${windowAddress} border_size 0`]);
|
||||||
|
dispatches.push(['dispatch', 'setprop', `${windowAddress} no_shadow 1`]);
|
||||||
|
dispatches.push(['dispatch', 'setprop', `${windowAddress} no_blur 1`]);
|
||||||
|
dispatches.push(['dispatch', 'setprop', `${windowAddress} decorate 0`]);
|
||||||
|
}
|
||||||
|
return dispatches;
|
||||||
|
}
|
||||||
|
|
||||||
|
function roundPlacementBounds(
|
||||||
|
bounds?: HyprlandPlacementBounds | null,
|
||||||
|
): HyprlandPlacementBounds | null {
|
||||||
|
if (!bounds) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
const rounded = {
|
||||||
|
x: Math.round(bounds.x),
|
||||||
|
y: Math.round(bounds.y),
|
||||||
|
width: Math.round(bounds.width),
|
||||||
|
height: Math.round(bounds.height),
|
||||||
|
};
|
||||||
|
return Number.isFinite(rounded.x) &&
|
||||||
|
Number.isFinite(rounded.y) &&
|
||||||
|
Number.isFinite(rounded.width) &&
|
||||||
|
Number.isFinite(rounded.height) &&
|
||||||
|
rounded.width > 0 &&
|
||||||
|
rounded.height > 0
|
||||||
|
? rounded
|
||||||
|
: null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ensureHyprlandWindowFloatingByTitle(options: {
|
||||||
|
title: string;
|
||||||
|
bounds?: HyprlandPlacementBounds | null;
|
||||||
|
platform?: NodeJS.Platform;
|
||||||
|
env?: NodeJS.ProcessEnv;
|
||||||
|
pid?: number;
|
||||||
|
execFileSync?: ExecFileSync;
|
||||||
|
}): boolean {
|
||||||
|
if (!shouldAttemptHyprlandWindowPlacement(options.platform, options.env)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const run = options.execFileSync ?? execFileSync;
|
||||||
|
try {
|
||||||
|
const clients = parseHyprlandClients(
|
||||||
|
String(run('hyprctl', ['-j', 'clients'], { encoding: 'utf-8' })),
|
||||||
|
);
|
||||||
|
const client = findHyprlandWindowForPlacement(clients, {
|
||||||
|
pid: options.pid ?? process.pid,
|
||||||
|
title: options.title,
|
||||||
|
});
|
||||||
|
if (!client) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const dispatches = buildHyprlandPlacementDispatches(client, options.bounds);
|
||||||
|
for (const args of dispatches) {
|
||||||
|
run('hyprctl', args, { stdio: 'ignore' });
|
||||||
|
}
|
||||||
|
return dispatches.length > 0;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -52,7 +52,9 @@ import {
|
|||||||
getKanjiWords,
|
getKanjiWords,
|
||||||
getSessionEvents,
|
getSessionEvents,
|
||||||
getSimilarWords,
|
getSimilarWords,
|
||||||
|
getStatsExcludedWords,
|
||||||
getVocabularyStats,
|
getVocabularyStats,
|
||||||
|
replaceStatsExcludedWords,
|
||||||
getWordAnimeAppearances,
|
getWordAnimeAppearances,
|
||||||
getWordDetail,
|
getWordDetail,
|
||||||
getWordOccurrences,
|
getWordOccurrences,
|
||||||
@@ -151,6 +153,7 @@ import {
|
|||||||
type SessionSummaryQueryRow,
|
type SessionSummaryQueryRow,
|
||||||
type SessionTimelineRow,
|
type SessionTimelineRow,
|
||||||
type SimilarWordRow,
|
type SimilarWordRow,
|
||||||
|
type StatsExcludedWordRow,
|
||||||
type StreakCalendarRow,
|
type StreakCalendarRow,
|
||||||
type VocabularyCleanupSummary,
|
type VocabularyCleanupSummary,
|
||||||
type WatchTimePerAnimeRow,
|
type WatchTimePerAnimeRow,
|
||||||
@@ -289,6 +292,7 @@ export type {
|
|||||||
SessionSummaryQueryRow,
|
SessionSummaryQueryRow,
|
||||||
SessionTimelineRow,
|
SessionTimelineRow,
|
||||||
SimilarWordRow,
|
SimilarWordRow,
|
||||||
|
StatsExcludedWordRow,
|
||||||
StreakCalendarRow,
|
StreakCalendarRow,
|
||||||
WatchTimePerAnimeRow,
|
WatchTimePerAnimeRow,
|
||||||
WordAnimeAppearanceRow,
|
WordAnimeAppearanceRow,
|
||||||
@@ -498,6 +502,14 @@ export class ImmersionTrackerService {
|
|||||||
return getVocabularyStats(this.db, limit, excludePos);
|
return getVocabularyStats(this.db, limit, excludePos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async getStatsExcludedWords(): Promise<StatsExcludedWordRow[]> {
|
||||||
|
return getStatsExcludedWords(this.db);
|
||||||
|
}
|
||||||
|
|
||||||
|
async replaceStatsExcludedWords(words: StatsExcludedWordRow[]): Promise<void> {
|
||||||
|
replaceStatsExcludedWords(this.db, words);
|
||||||
|
}
|
||||||
|
|
||||||
async cleanupVocabularyStats(): Promise<VocabularyCleanupSummary> {
|
async cleanupVocabularyStats(): Promise<VocabularyCleanupSummary> {
|
||||||
return cleanupVocabularyStats(this.db, {
|
return cleanupVocabularyStats(this.db, {
|
||||||
resolveLegacyPos: this.resolveLegacyVocabularyPos,
|
resolveLegacyPos: this.resolveLegacyVocabularyPos,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user