feat: integrate n+1 target highlighting

- Merge feature branch changes for n+1 target-only highlight flow

- Extend merged token model and token-merger to mark exactly-one unknown targets

- Thread n+1 candidate metadata through tokenizer and config systems

- Update subtitle renderer/state to route configured colors and new token class

- Resolve merge conflicts in core service tests, including subtitle and subsync behavior
This commit is contained in:
2026-02-15 02:36:48 -08:00
parent 88099e2ffa
commit 3a27c026b6
16 changed files with 494 additions and 66 deletions

View File

@@ -1,5 +1,5 @@
import { BrowserWindow, Extension, session } from "electron";
import { mergeTokens } from "../../token-merger";
import { markNPlusOneTargets, mergeTokens } from "../../token-merger";
import {
MergedToken,
NPlusOneMatchMode,
@@ -93,6 +93,25 @@ function resolveKnownWordText(
return matchMode === "surface" ? surface : headword;
}
function applyKnownWordMarking(
tokens: MergedToken[],
isKnownWord: (text: string) => boolean,
knownWordMatchMode: NPlusOneMatchMode,
): MergedToken[] {
return tokens.map((token) => {
const matchText = resolveKnownWordText(
token.surface,
token.headword,
knownWordMatchMode,
);
return {
...token,
isKnown: token.isKnown || (matchText ? isKnownWord(matchText) : false),
};
});
}
function extractYomitanHeadword(segment: YomitanParseSegment): string {
const headwords = segment.headwords;
if (!Array.isArray(headwords) || headwords.length === 0) {
@@ -187,6 +206,7 @@ function mapYomitanParseResultsToMergedTokens(
endPos: end,
partOfSpeech: PartOfSpeech.other,
isMerged: true,
isNPlusOneTarget: false,
isKnown: (() => {
const matchText = resolveKnownWordText(
surface,
@@ -368,13 +388,23 @@ export async function tokenizeSubtitleService(
const yomitanTokens = await parseWithYomitanInternalParser(tokenizeText, deps);
if (yomitanTokens && yomitanTokens.length > 0) {
return { text: displayText, tokens: yomitanTokens };
const knownMarkedTokens = applyKnownWordMarking(
yomitanTokens,
deps.isKnownWord,
deps.getKnownWordMatchMode(),
);
return { text: displayText, tokens: markNPlusOneTargets(knownMarkedTokens) };
}
try {
const mecabTokens = await deps.tokenizeWithMecab(tokenizeText);
if (mecabTokens && mecabTokens.length > 0) {
return { text: displayText, tokens: mecabTokens };
const knownMarkedTokens = applyKnownWordMarking(
mecabTokens,
deps.isKnownWord,
deps.getKnownWordMatchMode(),
);
return { text: displayText, tokens: markNPlusOneTargets(knownMarkedTokens) };
}
} catch (err) {
console.error("Tokenization error:", (err as Error).message);