Fix PR #60 CI failures and address CodeRabbit feedback

- Restore raw tokensSeen for session summaries; keep filtered counts for aggregates/known-words
- Fix missing headword binding in insertFilteredWordOccurrence test fixture
- Page vocabulary stats until enough visible rows collected after post-query filtering
- Use lifetime totals for library/detail word counts instead of partial retained-session sums
- Prefer stored rollup totals over recomputed session counts when recomputation is partial
- Emit flat known-word timeline points for line indexes with no occurrences
- Roll back local excluded-word state and throw on failed persistence
- Reset initialized flag on load failure to allow retry on next call
- Restore globalThis.localStorage after each excluded-words test
This commit is contained in:
2026-05-03 20:00:10 -07:00
parent 25d0aa47db
commit e241aa8c86
10 changed files with 426 additions and 74 deletions
@@ -463,7 +463,9 @@ describe('stats server API routes', () => {
const res = await app.request('/api/stats/sessions/1/known-words-timeline');
assert.equal(res.status, 200);
assert.deepEqual(await res.json(), [
{ linesSeen: 0, knownWordsSeen: 0, totalWordsSeen: 0 },
{ linesSeen: 1, knownWordsSeen: 2, totalWordsSeen: 2 },
{ linesSeen: 2, knownWordsSeen: 2, totalWordsSeen: 2 },
{ linesSeen: 3, knownWordsSeen: 3, totalWordsSeen: 7 },
]);
});
@@ -139,6 +139,7 @@ function insertFilteredWordOccurrence(
RETURNING id`,
)
.get(
headword,
word,
options.reading ?? '',
options.pos1 ?? '名詞',
@@ -1371,7 +1372,7 @@ test('word-count read models use filtered persisted occurrences with raw fallbac
const summaries = getSessionSummaries(db, 10);
assert.equal(
summaries.find((session) => session.sessionId === withOccurrences.sessionId)?.tokensSeen,
2,
5,
);
assert.equal(
summaries.find((session) => session.sessionId === fallbackOnly.sessionId)?.tokensSeen,
@@ -1382,8 +1383,69 @@ test('word-count read models use filtered persisted occurrences with raw fallbac
assert.equal(hints.totalTokensSeen, 9);
const rollup = getDailyRollups(db, 1)[0]!;
assert.equal(rollup.totalTokensSeen, 9);
assert.equal(rollup.tokensPerMin, 9);
assert.equal(rollup.totalTokensSeen, 12);
assert.equal(rollup.tokensPerMin, 12);
} finally {
db.close();
cleanupDbPath(dbPath);
}
});
test('rollups keep persisted totals when retained-session word counts are partial', () => {
const dbPath = makeDbPath();
const db = new Database(dbPath);
try {
ensureSchema(db);
const videoId = getOrCreateVideoRecord(db, 'local:/tmp/partial-rollup.mkv', {
canonicalTitle: 'Partial Rollup',
sourcePath: '/tmp/partial-rollup.mkv',
sourceUrl: null,
sourceType: SOURCE_TYPE_LOCAL,
});
const startedAtMs = 1_700_000_000_000;
const { sessionId } = startSessionRecord(db, videoId, startedAtMs);
db.prepare(
`
UPDATE imm_sessions
SET ended_at_ms = ?, status = 2, active_watched_ms = ?, tokens_seen = ?
WHERE session_id = ?
`,
).run(startedAtMs + 60_000, 60_000, 4, sessionId);
insertFilteredWordOccurrence(db, {
sessionId,
videoId,
occurrenceCount: 4,
startedAtMs,
});
const rollupDay = Math.floor(startedAtMs / 86_400_000);
db.prepare(
`
INSERT INTO imm_daily_rollups (
rollup_day, video_id, total_sessions, total_active_min, total_lines_seen,
total_tokens_seen, total_cards
) VALUES (?, ?, ?, ?, ?, ?, ?)
`,
).run(rollupDay, videoId, 2, 2, 8, 12, 0);
db.prepare(
`
INSERT INTO imm_monthly_rollups (
rollup_month, video_id, total_sessions, total_active_min, total_lines_seen,
total_tokens_seen, total_cards
) VALUES (?, ?, ?, ?, ?, ?, ?)
`,
).run(202311, videoId, 2, 3, 12, 18, 0);
const daily = getDailyRollups(db, 1)[0]!;
assert.equal(daily.totalTokensSeen, 12);
assert.equal(daily.tokensPerMin, 6);
const monthly = getMonthlyRollups(db, 1)[0]!;
assert.equal(monthly.totalTokensSeen, 18);
assert.equal(monthly.tokensPerMin, 6);
} finally {
db.close();
cleanupDbPath(dbPath);
@@ -1639,6 +1701,41 @@ test('getVocabularyStats filters rows that fail tokenizer vocabulary rules', ()
}
});
test('getVocabularyStats pages past hidden rows until enough visible rows are collected', () => {
const dbPath = makeDbPath();
const db = new Database(dbPath);
try {
ensureSchema(db);
const stmts = createTrackerPreparedStatements(db);
for (let i = 0; i < 105; i += 1) {
stmts.wordUpsertStmt.run(
`助詞${i}`,
`助詞${i}`,
`じょし${i}`,
'particle',
'助詞',
'格助詞',
'',
10_000 - i,
1_000,
);
}
stmts.wordUpsertStmt.run('猫', '猫', 'ねこ', 'noun', '名詞', '一般', '', 1, 1_000);
const rows = getVocabularyStats(db, 1);
assert.deepEqual(
rows.map((row) => row.headword),
['猫'],
);
} finally {
db.close();
cleanupDbPath(dbPath);
}
});
test('getVocabularyStats returns empty array when no words exist', () => {
const dbPath = makeDbPath();
const db = new Database(dbPath);
@@ -2863,6 +2960,96 @@ test('anime library and detail still return lifetime rows without retained sessi
}
});
test('anime and media detail prefer lifetime totals over partial retained sessions', () => {
const dbPath = makeDbPath();
const db = new Database(dbPath);
try {
ensureSchema(db);
const animeId = getOrCreateAnimeRecord(db, {
parsedTitle: 'Partial History Anime',
canonicalTitle: 'Partial History Anime',
anilistId: null,
titleRomaji: null,
titleEnglish: null,
titleNative: null,
metadataJson: null,
});
const videoId = getOrCreateVideoRecord(db, 'local:/tmp/partial-history.mkv', {
canonicalTitle: 'Partial History Episode',
sourcePath: '/tmp/partial-history.mkv',
sourceUrl: null,
sourceType: SOURCE_TYPE_LOCAL,
});
linkVideoToAnimeRecord(db, videoId, {
animeId,
parsedBasename: 'Partial History Episode',
parsedTitle: 'Partial History Anime',
parsedSeason: 1,
parsedEpisode: 1,
parserSource: 'fallback',
parserConfidence: 1,
parseMetadataJson: null,
});
const startedAtMs = 1_700_000_000_000;
const { sessionId } = startSessionRecord(db, videoId, startedAtMs);
db.prepare(
`
UPDATE imm_sessions
SET ended_at_ms = ?, status = 2, active_watched_ms = ?, tokens_seen = ?
WHERE session_id = ?
`,
).run(startedAtMs + 30_000, 30_000, 10, sessionId);
const now = Date.now();
db.prepare(
`
INSERT INTO imm_lifetime_anime (
anime_id,
total_sessions,
total_active_ms,
total_cards,
total_lines_seen,
total_tokens_seen,
episodes_started,
episodes_completed,
first_watched_ms,
last_watched_ms,
CREATED_DATE,
LAST_UPDATE_DATE
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`,
).run(animeId, 3, 90_000, 1, 12, 100, 1, 0, startedAtMs, startedAtMs, now, now);
db.prepare(
`
INSERT INTO imm_lifetime_media (
video_id,
total_sessions,
total_active_ms,
total_cards,
total_lines_seen,
total_tokens_seen,
completed,
first_watched_ms,
last_watched_ms,
CREATED_DATE,
LAST_UPDATE_DATE
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`,
).run(videoId, 3, 90_000, 1, 12, 100, 0, startedAtMs, startedAtMs, now, now);
assert.equal(getAnimeLibrary(db)[0]?.totalTokensSeen, 100);
assert.equal(getAnimeDetail(db, animeId)?.totalTokensSeen, 100);
assert.equal(getMediaLibrary(db)[0]?.totalTokensSeen, 100);
assert.equal(getMediaDetail(db, videoId)?.totalTokensSeen, 100);
} finally {
db.close();
cleanupDbPath(dbPath);
}
});
test('media library and detail queries read lifetime totals', () => {
const dbPath = makeDbPath();
const db = new Database(dbPath);
@@ -4209,7 +4396,7 @@ test('getTrendsDashboard librarySummary returns null lookupsPerHundred when word
}
});
test('getTrendsDashboard word metrics use filtered persisted occurrences', () => {
test('getTrendsDashboard rollup word metrics keep persisted totals over partial session counts', () => {
const dbPath = makeDbPath();
const db = new Database(dbPath);
@@ -4308,16 +4495,16 @@ test('getTrendsDashboard word metrics use filtered persisted occurrences', () =>
const dashboard = getTrendsDashboard(db, 'all', 'day');
assert.deepEqual(
dashboard.activity.words.map((point) => point.value),
[2, 3],
[10, 20],
);
assert.deepEqual(
dashboard.progress.words.map((point) => point.value),
[2, 5],
[10, 30],
);
assert.equal(dashboard.ratios.lookupsPerHundred[0]?.value, 200);
assert.equal(dashboard.librarySummary[0]?.words, 5);
assert.equal(dashboard.librarySummary[0]?.lookupsPerHundred, 200);
assert.equal(dashboard.animeCumulative.words.at(-1)?.value, 5);
assert.equal(dashboard.librarySummary[0]?.words, 30);
assert.equal(dashboard.librarySummary[0]?.lookupsPerHundred, 33.3);
assert.equal(dashboard.animeCumulative.words.at(-1)?.value, 30);
} finally {
db.close();
cleanupDbPath(dbPath);
@@ -53,7 +53,7 @@ export function getVocabularyStats(
limit = 100,
excludePos?: string[],
): VocabularyStatsRow[] {
const queryLimit = Math.max(
const pageSize = Math.max(
limit,
limit * VOCABULARY_STATS_FILTER_OVERSAMPLE_FACTOR,
limit + VOCABULARY_STATS_FILTER_OVERSAMPLE_MIN,
@@ -74,12 +74,20 @@ export function getVocabularyStats(
LEFT JOIN imm_subtitle_lines sl ON sl.line_id = o.line_id AND sl.anime_id IS NOT NULL
${whereClause ? whereClause.replace('part_of_speech', 'w.part_of_speech') : ''}
GROUP BY w.id
ORDER BY w.frequency DESC LIMIT ?
ORDER BY w.frequency DESC LIMIT ? OFFSET ?
`);
const params = hasExclude ? [...excludePos, queryLimit] : [queryLimit];
return (stmt.all(...params) as VocabularyStatsRow[])
.filter(isVocabularyStatsRowVisible)
.slice(0, limit);
const visibleRows: VocabularyStatsRow[] = [];
let offset = 0;
while (visibleRows.length < limit) {
const params = hasExclude ? [...excludePos, pageSize, offset] : [pageSize, offset];
const page = stmt.all(...params) as VocabularyStatsRow[];
if (page.length === 0) break;
visibleRows.push(...page.filter(isVocabularyStatsRowVisible));
offset += page.length;
}
return visibleRows.slice(0, limit);
}
export function getStatsExcludedWords(db: DatabaseSync): StatsExcludedWordRow[] {
@@ -27,20 +27,9 @@ import {
} from './query-shared';
export function getAnimeLibrary(db: DatabaseSync): AnimeLibraryRow[] {
const wordsExpr = sessionDisplayWordsExpr('s', 'swc');
const rows = db
.prepare(
`
${SESSION_WORD_COUNTS_CTE},
anime_word_counts AS (
SELECT v.anime_id AS animeId, SUM(${wordsExpr}) AS totalTokensSeen
FROM imm_sessions s
JOIN imm_videos v ON v.video_id = s.video_id
LEFT JOIN session_word_counts swc ON swc.sessionId = s.session_id
WHERE s.ended_at_ms IS NOT NULL
AND v.anime_id IS NOT NULL
GROUP BY v.anime_id
)
SELECT
a.anime_id AS animeId,
a.canonical_title AS canonicalTitle,
@@ -48,14 +37,13 @@ export function getAnimeLibrary(db: DatabaseSync): AnimeLibraryRow[] {
COALESCE(lm.total_sessions, 0) AS totalSessions,
COALESCE(lm.total_active_ms, 0) AS totalActiveMs,
COALESCE(lm.total_cards, 0) AS totalCards,
COALESCE(awc.totalTokensSeen, lm.total_tokens_seen, 0) AS totalTokensSeen,
COALESCE(lm.total_tokens_seen, 0) AS totalTokensSeen,
COUNT(DISTINCT v.video_id) AS episodeCount,
a.episodes_total AS episodesTotal,
COALESCE(lm.last_watched_ms, 0) AS lastWatchedMs
FROM imm_anime a
JOIN imm_lifetime_anime lm ON lm.anime_id = a.anime_id
JOIN imm_videos v ON v.anime_id = a.anime_id
LEFT JOIN anime_word_counts awc ON awc.animeId = a.anime_id
GROUP BY a.anime_id
ORDER BY totalActiveMs DESC, lm.last_watched_ms DESC, canonicalTitle ASC
`,
@@ -68,7 +56,6 @@ export function getAnimeLibrary(db: DatabaseSync): AnimeLibraryRow[] {
}
export function getAnimeDetail(db: DatabaseSync, animeId: number): AnimeDetailRow | null {
const wordsExpr = sessionDisplayWordsExpr('s', 'swc', 'COALESCE(asm.tokensSeen, s.tokens_seen)');
const row = db
.prepare(
`
@@ -84,10 +71,7 @@ export function getAnimeDetail(db: DatabaseSync, animeId: number): AnimeDetailRo
COALESCE(lm.total_sessions, 0) AS totalSessions,
COALESCE(lm.total_active_ms, 0) AS totalActiveMs,
COALESCE(lm.total_cards, 0) AS totalCards,
CASE
WHEN COUNT(s.session_id) > 0 THEN COALESCE(SUM(${wordsExpr}), 0)
ELSE COALESCE(lm.total_tokens_seen, 0)
END AS totalTokensSeen,
COALESCE(lm.total_tokens_seen, 0) AS totalTokensSeen,
COALESCE(lm.total_lines_seen, 0) AS totalLinesSeen,
COALESCE(SUM(COALESCE(asm.lookupCount, s.lookup_count, 0)), 0) AS totalLookupCount,
COALESCE(SUM(COALESCE(asm.lookupHits, s.lookup_hits, 0)), 0) AS totalLookupHits,
@@ -99,7 +83,6 @@ export function getAnimeDetail(db: DatabaseSync, animeId: number): AnimeDetailRo
JOIN imm_videos v ON v.anime_id = a.anime_id
LEFT JOIN imm_sessions s ON s.video_id = v.video_id
LEFT JOIN active_session_metrics asm ON asm.sessionId = s.session_id
LEFT JOIN session_word_counts swc ON swc.sessionId = s.session_id
WHERE a.anime_id = ?
GROUP BY a.anime_id
`,
@@ -219,25 +202,16 @@ export function getAnimeEpisodes(db: DatabaseSync, animeId: number): AnimeEpisod
}
export function getMediaLibrary(db: DatabaseSync): MediaLibraryRow[] {
const wordsExpr = sessionDisplayWordsExpr('s', 'swc');
const rows = db
.prepare(
`
${SESSION_WORD_COUNTS_CTE},
media_word_counts AS (
SELECT s.video_id AS videoId, SUM(${wordsExpr}) AS totalTokensSeen
FROM imm_sessions s
LEFT JOIN session_word_counts swc ON swc.sessionId = s.session_id
WHERE s.ended_at_ms IS NOT NULL
GROUP BY s.video_id
)
SELECT
v.video_id AS videoId,
v.canonical_title AS canonicalTitle,
COALESCE(lm.total_sessions, 0) AS totalSessions,
COALESCE(lm.total_active_ms, 0) AS totalActiveMs,
COALESCE(lm.total_cards, 0) AS totalCards,
COALESCE(mwc.totalTokensSeen, lm.total_tokens_seen, 0) AS totalTokensSeen,
COALESCE(lm.total_tokens_seen, 0) AS totalTokensSeen,
COALESCE(lm.last_watched_ms, 0) AS lastWatchedMs,
yv.youtube_video_id AS youtubeVideoId,
yv.video_url AS videoUrl,
@@ -256,7 +230,6 @@ export function getMediaLibrary(db: DatabaseSync): MediaLibraryRow[] {
END AS hasCoverArt
FROM imm_videos v
JOIN imm_lifetime_media lm ON lm.video_id = v.video_id
LEFT JOIN media_word_counts mwc ON mwc.videoId = v.video_id
LEFT JOIN imm_media_art ma ON ma.video_id = v.video_id
LEFT JOIN imm_youtube_videos yv ON yv.video_id = v.video_id
ORDER BY lm.last_watched_ms DESC
@@ -270,7 +243,6 @@ export function getMediaLibrary(db: DatabaseSync): MediaLibraryRow[] {
}
export function getMediaDetail(db: DatabaseSync, videoId: number): MediaDetailRow | null {
const wordsExpr = sessionDisplayWordsExpr('s', 'swc', 'COALESCE(asm.tokensSeen, s.tokens_seen)');
return db
.prepare(
`
@@ -282,10 +254,7 @@ export function getMediaDetail(db: DatabaseSync, videoId: number): MediaDetailRo
COALESCE(lm.total_sessions, 0) AS totalSessions,
COALESCE(lm.total_active_ms, 0) AS totalActiveMs,
COALESCE(lm.total_cards, 0) AS totalCards,
CASE
WHEN COUNT(s.session_id) > 0 THEN COALESCE(SUM(${wordsExpr}), 0)
ELSE COALESCE(lm.total_tokens_seen, 0)
END AS totalTokensSeen,
COALESCE(lm.total_tokens_seen, 0) AS totalTokensSeen,
COALESCE(lm.total_lines_seen, 0) AS totalLinesSeen,
COALESCE(SUM(COALESCE(asm.lookupCount, s.lookup_count, 0)), 0) AS totalLookupCount,
COALESCE(SUM(COALESCE(asm.lookupHits, s.lookup_hits, 0)), 0) AS totalLookupHits,
@@ -306,7 +275,6 @@ export function getMediaDetail(db: DatabaseSync, videoId: number): MediaDetailRo
LEFT JOIN imm_youtube_videos yv ON yv.video_id = v.video_id
LEFT JOIN imm_sessions s ON s.video_id = v.video_id
LEFT JOIN active_session_metrics asm ON asm.sessionId = s.session_id
LEFT JOIN session_word_counts swc ON swc.sessionId = s.session_id
WHERE v.video_id = ?
GROUP BY v.video_id
`,
@@ -398,11 +366,19 @@ export function getMediaDailyRollups(
total_sessions AS totalSessions,
total_active_min AS totalActiveMin,
total_lines_seen AS totalLinesSeen,
COALESCE(dwc.totalTokensSeen, total_tokens_seen) AS totalTokensSeen,
CASE
WHEN dwc.totalTokensSeen IS NOT NULL AND dwc.totalTokensSeen > total_tokens_seen THEN dwc.totalTokensSeen
ELSE total_tokens_seen
END AS totalTokensSeen,
total_cards AS totalCards,
cards_per_hour AS cardsPerHour,
CASE
WHEN total_active_min > 0 THEN COALESCE(dwc.totalTokensSeen, total_tokens_seen) * 1.0 / total_active_min
WHEN total_active_min > 0 THEN (
CASE
WHEN dwc.totalTokensSeen IS NOT NULL AND dwc.totalTokensSeen > total_tokens_seen THEN dwc.totalTokensSeen
ELSE total_tokens_seen
END
) * 1.0 / total_active_min
ELSE NULL
END AS tokensPerMin,
lookup_hit_rate AS lookupHitRate
@@ -454,11 +430,19 @@ export function getAnimeDailyRollups(
SELECT r.rollup_day AS rollupDayOrMonth, r.video_id AS videoId,
r.total_sessions AS totalSessions, r.total_active_min AS totalActiveMin,
r.total_lines_seen AS totalLinesSeen,
COALESCE(dwc.totalTokensSeen, r.total_tokens_seen) AS totalTokensSeen,
CASE
WHEN dwc.totalTokensSeen IS NOT NULL AND dwc.totalTokensSeen > r.total_tokens_seen THEN dwc.totalTokensSeen
ELSE r.total_tokens_seen
END AS totalTokensSeen,
r.total_cards AS totalCards,
r.cards_per_hour AS cardsPerHour,
CASE
WHEN r.total_active_min > 0 THEN COALESCE(dwc.totalTokensSeen, r.total_tokens_seen) * 1.0 / r.total_active_min
WHEN r.total_active_min > 0 THEN (
CASE
WHEN dwc.totalTokensSeen IS NOT NULL AND dwc.totalTokensSeen > r.total_tokens_seen THEN dwc.totalTokensSeen
ELSE r.total_tokens_seen
END
) * 1.0 / r.total_active_min
ELSE NULL
END AS tokensPerMin,
r.lookup_hit_rate AS lookupHitRate
@@ -17,7 +17,6 @@ import {
} from './query-shared';
export function getSessionSummaries(db: DatabaseSync, limit = 50): SessionSummaryQueryRow[] {
const wordsExpr = sessionDisplayWordsExpr('s', 'swc', 'COALESCE(asm.tokensSeen, s.tokens_seen)');
const prepared = db.prepare(`
${ACTIVE_SESSION_METRICS_CTE}
SELECT
@@ -31,14 +30,13 @@ export function getSessionSummaries(db: DatabaseSync, limit = 50): SessionSummar
COALESCE(asm.totalWatchedMs, s.total_watched_ms, 0) AS totalWatchedMs,
COALESCE(asm.activeWatchedMs, s.active_watched_ms, 0) AS activeWatchedMs,
COALESCE(asm.linesSeen, s.lines_seen, 0) AS linesSeen,
${wordsExpr} AS tokensSeen,
COALESCE(asm.tokensSeen, s.tokens_seen, 0) AS tokensSeen,
COALESCE(asm.cardsMined, s.cards_mined, 0) AS cardsMined,
COALESCE(asm.lookupCount, s.lookup_count, 0) AS lookupCount,
COALESCE(asm.lookupHits, s.lookup_hits, 0) AS lookupHits,
COALESCE(asm.yomitanLookupCount, s.yomitan_lookup_count, 0) AS yomitanLookupCount
FROM imm_sessions s
LEFT JOIN active_session_metrics asm ON asm.sessionId = s.session_id
LEFT JOIN session_word_counts swc ON swc.sessionId = s.session_id
LEFT JOIN imm_videos v ON v.video_id = s.video_id
LEFT JOIN imm_anime a ON a.anime_id = v.anime_id
ORDER BY s.started_at_ms DESC
@@ -382,11 +380,19 @@ export function getDailyRollups(db: DatabaseSync, limit = 60): ImmersionSessionR
r.total_sessions AS totalSessions,
r.total_active_min AS totalActiveMin,
r.total_lines_seen AS totalLinesSeen,
COALESCE(dwc.totalTokensSeen, r.total_tokens_seen) AS totalTokensSeen,
CASE
WHEN dwc.totalTokensSeen IS NOT NULL AND dwc.totalTokensSeen > r.total_tokens_seen THEN dwc.totalTokensSeen
ELSE r.total_tokens_seen
END AS totalTokensSeen,
r.total_cards AS totalCards,
r.cards_per_hour AS cardsPerHour,
CASE
WHEN r.total_active_min > 0 THEN COALESCE(dwc.totalTokensSeen, r.total_tokens_seen) * 1.0 / r.total_active_min
WHEN r.total_active_min > 0 THEN (
CASE
WHEN dwc.totalTokensSeen IS NOT NULL AND dwc.totalTokensSeen > r.total_tokens_seen THEN dwc.totalTokensSeen
ELSE r.total_tokens_seen
END
) * 1.0 / r.total_active_min
ELSE NULL
END AS tokensPerMin,
r.lookup_hit_rate AS lookupHitRate
@@ -432,14 +438,22 @@ export function getMonthlyRollups(db: DatabaseSync, limit = 24): ImmersionSessio
r.total_sessions AS totalSessions,
r.total_active_min AS totalActiveMin,
r.total_lines_seen AS totalLinesSeen,
COALESCE(mwc.totalTokensSeen, r.total_tokens_seen) AS totalTokensSeen,
CASE
WHEN mwc.totalTokensSeen IS NOT NULL AND mwc.totalTokensSeen > r.total_tokens_seen THEN mwc.totalTokensSeen
ELSE r.total_tokens_seen
END AS totalTokensSeen,
r.total_cards AS totalCards,
CASE
WHEN r.total_active_min > 0 THEN (r.total_cards * 60.0) / r.total_active_min
ELSE NULL
END AS cardsPerHour,
CASE
WHEN r.total_active_min > 0 THEN COALESCE(mwc.totalTokensSeen, r.total_tokens_seen) * 1.0 / r.total_active_min
WHEN r.total_active_min > 0 THEN (
CASE
WHEN mwc.totalTokensSeen IS NOT NULL AND mwc.totalTokensSeen > r.total_tokens_seen THEN mwc.totalTokensSeen
ELSE r.total_tokens_seen
END
) * 1.0 / r.total_active_min
ELSE NULL
END AS tokensPerMin,
NULL AS lookupHitRate
@@ -172,9 +172,7 @@ test('stats excluded words are replaced and read from sqlite storage', () => {
]);
replaceStatsExcludedWords(db, [{ headword: '犬', word: '犬', reading: 'いぬ' }]);
assert.deepEqual(getStatsExcludedWords(db), [
{ headword: '犬', word: '犬', reading: 'いぬ' },
]);
assert.deepEqual(getStatsExcludedWords(db), [{ headword: '犬', word: '犬', reading: 'いぬ' }]);
} finally {
db.close();
cleanupDbPath(dbPath);
+3 -3
View File
@@ -452,7 +452,7 @@ export function createStatsApp(
}
}
const sortedLineIndices = [...totalLineGroups.keys()].sort((a, b) => a - b);
const maxLineIndex = Math.max(...totalLineGroups.keys(), ...knownLineGroups.keys(), -1);
let knownWordsSeen = 0;
let totalWordsSeen = 0;
const knownByLinesSeen: Array<{
@@ -461,9 +461,9 @@ export function createStatsApp(
totalWordsSeen: number;
}> = [];
for (const lineIdx of sortedLineIndices) {
for (let lineIdx = 0; lineIdx <= maxLineIndex; lineIdx += 1) {
knownWordsSeen += knownLineGroups.get(lineIdx) ?? 0;
totalWordsSeen += totalLineGroups.get(lineIdx)!;
totalWordsSeen += totalLineGroups.get(lineIdx) ?? 0;
knownByLinesSeen.push({
linesSeen: lineIdx,
knownWordsSeen,