mirror of
https://github.com/ksyasuda/SubMiner.git
synced 2026-04-28 04:19:27 -07:00
fix: exclude kana-only n+1 targets
This commit is contained in:
@@ -2306,6 +2306,29 @@ test('tokenizeSubtitle selects one N+1 target token', async () => {
|
||||
assert.equal(targets[0]?.surface, '犬');
|
||||
});
|
||||
|
||||
test('tokenizeSubtitle does not select kana-only N+1 target tokens', async () => {
|
||||
const result = await tokenizeSubtitle(
|
||||
'私のばあい',
|
||||
makeDepsFromYomitanTokens(
|
||||
[
|
||||
{ surface: '私', reading: 'わたし', headword: '私' },
|
||||
{ surface: 'の', reading: 'の', headword: 'の' },
|
||||
{ surface: 'ばあい', reading: 'ばあい', headword: '場合' },
|
||||
],
|
||||
{
|
||||
getMinSentenceWordsForNPlusOne: () => 2,
|
||||
isKnownWord: (text) => text === '私',
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
assert.equal(result.tokens?.length, 3);
|
||||
assert.equal(
|
||||
result.tokens?.some((token) => token.isNPlusOneTarget),
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
test('tokenizeSubtitle does not mark target when sentence has multiple candidates', async () => {
|
||||
const result = await tokenizeSubtitle(
|
||||
'猫犬',
|
||||
@@ -3040,15 +3063,18 @@ test('tokenizeSubtitle uses Yomitan word classes to classify standalone particle
|
||||
let mecabCalls = 0;
|
||||
const result = await tokenizeSubtitle(
|
||||
'は',
|
||||
makeDepsFromYomitanTokens([{ surface: 'は', reading: 'は', headword: 'は', wordClasses: ['prt'] }], {
|
||||
getFrequencyDictionaryEnabled: () => true,
|
||||
getFrequencyRank: (text) => (text === 'は' ? 10 : null),
|
||||
getJlptLevel: (text) => (text === 'は' ? 'N5' : null),
|
||||
tokenizeWithMecab: async () => {
|
||||
mecabCalls += 1;
|
||||
return null;
|
||||
makeDepsFromYomitanTokens(
|
||||
[{ surface: 'は', reading: 'は', headword: 'は', wordClasses: ['prt'] }],
|
||||
{
|
||||
getFrequencyDictionaryEnabled: () => true,
|
||||
getFrequencyRank: (text) => (text === 'は' ? 10 : null),
|
||||
getJlptLevel: (text) => (text === 'は' ? 'N5' : null),
|
||||
tokenizeWithMecab: async () => {
|
||||
mecabCalls += 1;
|
||||
return null;
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
);
|
||||
|
||||
assert.equal(mecabCalls, 1);
|
||||
@@ -3063,24 +3089,27 @@ test('tokenizeSubtitle uses Yomitan word classes to classify standalone particle
|
||||
test('tokenizeSubtitle fills detailed MeCab POS when Yomitan word class supplies coarse POS', async () => {
|
||||
const result = await tokenizeSubtitle(
|
||||
'は',
|
||||
makeDepsFromYomitanTokens([{ surface: 'は', reading: 'は', headword: 'は', wordClasses: ['prt'] }], {
|
||||
tokenizeWithMecab: async () => [
|
||||
{
|
||||
headword: 'は',
|
||||
surface: 'は',
|
||||
reading: 'ハ',
|
||||
startPos: 0,
|
||||
endPos: 1,
|
||||
partOfSpeech: PartOfSpeech.particle,
|
||||
pos1: '助詞',
|
||||
pos2: '係助詞',
|
||||
pos3: '*',
|
||||
isMerged: false,
|
||||
isKnown: false,
|
||||
isNPlusOneTarget: false,
|
||||
},
|
||||
],
|
||||
}),
|
||||
makeDepsFromYomitanTokens(
|
||||
[{ surface: 'は', reading: 'は', headword: 'は', wordClasses: ['prt'] }],
|
||||
{
|
||||
tokenizeWithMecab: async () => [
|
||||
{
|
||||
headword: 'は',
|
||||
surface: 'は',
|
||||
reading: 'ハ',
|
||||
startPos: 0,
|
||||
endPos: 1,
|
||||
partOfSpeech: PartOfSpeech.particle,
|
||||
pos1: '助詞',
|
||||
pos2: '係助詞',
|
||||
pos3: '*',
|
||||
isMerged: false,
|
||||
isKnown: false,
|
||||
isNPlusOneTarget: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
assert.equal(result.tokens?.[0]?.partOfSpeech, PartOfSpeech.particle);
|
||||
@@ -3682,7 +3711,7 @@ test('tokenizeSubtitle excludes single-kana merged tokens from frequency highlig
|
||||
assert.equal(result.tokens?.[0]?.frequencyRank, undefined);
|
||||
});
|
||||
|
||||
test('tokenizeSubtitle excludes merged function/content token from frequency highlighting but keeps N+1', async () => {
|
||||
test('tokenizeSubtitle excludes merged kana-only function/content token from frequency and N+1', async () => {
|
||||
const result = await tokenizeSubtitle(
|
||||
'になれば',
|
||||
makeDepsFromYomitanTokens([{ surface: 'になれば', reading: 'になれば', headword: 'なる' }], {
|
||||
@@ -3736,7 +3765,7 @@ test('tokenizeSubtitle excludes merged function/content token from frequency hig
|
||||
assert.equal(result.tokens?.length, 1);
|
||||
assert.equal(result.tokens?.[0]?.pos1, '助詞|動詞');
|
||||
assert.equal(result.tokens?.[0]?.frequencyRank, undefined);
|
||||
assert.equal(result.tokens?.[0]?.isNPlusOneTarget, true);
|
||||
assert.equal(result.tokens?.[0]?.isNPlusOneTarget, false);
|
||||
});
|
||||
|
||||
test('tokenizeSubtitle clears all annotations for kana-only demonstrative helper merges', async () => {
|
||||
@@ -3935,7 +3964,7 @@ test('tokenizeSubtitle clears all annotations for explanatory pondering endings'
|
||||
surface: 'どうかしちゃった',
|
||||
headword: 'どうかしちゃう',
|
||||
isKnown: false,
|
||||
isNPlusOneTarget: true,
|
||||
isNPlusOneTarget: false,
|
||||
frequencyRank: 3200,
|
||||
jlptLevel: 'N3',
|
||||
},
|
||||
|
||||
@@ -570,13 +570,13 @@ test('annotateTokens keeps other annotations for name matches when name highligh
|
||||
let jlptLookupCalls = 0;
|
||||
const tokens = [
|
||||
makeToken({
|
||||
surface: 'オリヴィア',
|
||||
reading: 'オリヴィア',
|
||||
headword: 'オリヴィア',
|
||||
surface: '山田',
|
||||
reading: 'ヤマダ',
|
||||
headword: '山田',
|
||||
isNameMatch: true,
|
||||
frequencyRank: 42,
|
||||
startPos: 0,
|
||||
endPos: 5,
|
||||
endPos: 2,
|
||||
}),
|
||||
];
|
||||
|
||||
@@ -770,7 +770,7 @@ test('annotateTokens allows previously default-excluded pos1 when removed from e
|
||||
});
|
||||
|
||||
assert.equal(result[0]?.frequencyRank, 8);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, true);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, false);
|
||||
});
|
||||
|
||||
test('annotateTokens excludes default non-independent pos2 from frequency and N+1', () => {
|
||||
@@ -787,13 +787,9 @@ test('annotateTokens excludes default non-independent pos2 from frequency and N+
|
||||
}),
|
||||
];
|
||||
|
||||
const result = annotateTokens(
|
||||
tokens,
|
||||
makeDeps(),
|
||||
{
|
||||
minSentenceWordsForNPlusOne: 1,
|
||||
},
|
||||
);
|
||||
const result = annotateTokens(tokens, makeDeps(), {
|
||||
minSentenceWordsForNPlusOne: 1,
|
||||
});
|
||||
|
||||
assert.equal(result[0]?.frequencyRank, undefined);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, false);
|
||||
@@ -969,10 +965,10 @@ test('annotateTokens allows previously default-excluded pos2 when removed from e
|
||||
});
|
||||
|
||||
assert.equal(result[0]?.frequencyRank, 9);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, true);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, false);
|
||||
});
|
||||
|
||||
test('annotateTokens excludes composite function/content tokens from frequency but keeps N+1 eligible', () => {
|
||||
test('annotateTokens excludes kana-only composite function/content tokens from frequency and N+1', () => {
|
||||
const tokens = [
|
||||
makeToken({
|
||||
surface: 'になれば',
|
||||
@@ -990,7 +986,7 @@ test('annotateTokens excludes composite function/content tokens from frequency b
|
||||
});
|
||||
|
||||
assert.equal(result[0]?.frequencyRank, undefined);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, true);
|
||||
assert.equal(result[0]?.isNPlusOneTarget, false);
|
||||
});
|
||||
|
||||
test('annotateTokens excludes composite tokens when all component pos tags are excluded', () => {
|
||||
|
||||
@@ -282,6 +282,26 @@ function isExcludedByTagSet(normalizedTag: string, exclusions: ReadonlySet<strin
|
||||
return parts.every((part) => exclusions.has(part));
|
||||
}
|
||||
|
||||
function isKanaChar(char: string): boolean {
|
||||
const code = char.codePointAt(0);
|
||||
if (code === undefined) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (
|
||||
(code >= 0x3041 && code <= 0x3096) ||
|
||||
(code >= 0x309b && code <= 0x309f) ||
|
||||
code === 0x30fc ||
|
||||
(code >= 0x30a0 && code <= 0x30fa) ||
|
||||
(code >= 0x30fd && code <= 0x30ff)
|
||||
);
|
||||
}
|
||||
|
||||
function isKanaOnlyText(text: string): boolean {
|
||||
const normalized = text.trim();
|
||||
return normalized.length > 0 && Array.from(normalized).every((char) => isKanaChar(char));
|
||||
}
|
||||
|
||||
export function isNPlusOneCandidateToken(
|
||||
token: MergedToken,
|
||||
pos1Exclusions: ReadonlySet<string> = N_PLUS_ONE_IGNORED_POS1,
|
||||
@@ -290,6 +310,9 @@ export function isNPlusOneCandidateToken(
|
||||
if (token.isKnown) {
|
||||
return false;
|
||||
}
|
||||
if (isKanaOnlyText(token.surface)) {
|
||||
return false;
|
||||
}
|
||||
return isNPlusOneWordCountToken(token, pos1Exclusions, pos2Exclusions);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user