mirror of
https://github.com/ksyasuda/SubMiner.git
synced 2026-03-30 06:12:06 -07:00
Compare commits
10 Commits
v0.9.3
...
episode-br
| Author | SHA1 | Date | |
|---|---|---|---|
|
6ae3888b53
|
|||
|
6e041bc68e
|
|||
|
8db89c2239
|
|||
|
f9a4039ad2
|
|||
|
8e5c21b443
|
|||
|
55b350c3a2
|
|||
|
54324df3be
|
|||
| 35adf8299c | |||
|
2d4f2d1139
|
|||
|
77e632276b
|
20
.agents/plugins/marketplace.json
Normal file
20
.agents/plugins/marketplace.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "subminer-local",
|
||||
"interface": {
|
||||
"displayName": "SubMiner Local"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "subminer-workflow",
|
||||
"source": {
|
||||
"source": "local",
|
||||
"path": "./plugins/subminer-workflow"
|
||||
},
|
||||
"policy": {
|
||||
"installation": "AVAILABLE",
|
||||
"authentication": "ON_INSTALL"
|
||||
},
|
||||
"category": "Productivity"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,127 +1,22 @@
|
||||
---
|
||||
name: "subminer-change-verification"
|
||||
description: "Use when working in the SubMiner repo and you need to verify code changes actually work. Covers targeted regression checks during debugging and pre-handoff verification, with cheap-first lane selection for config, docs, launcher/plugin, runtime-compat, and optional real-runtime escalation."
|
||||
name: 'subminer-change-verification'
|
||||
description: 'Compatibility shim. Canonical SubMiner change verification workflow now lives in the repo-local subminer-workflow plugin.'
|
||||
---
|
||||
|
||||
# SubMiner Change Verification
|
||||
# Compatibility Shim
|
||||
|
||||
Use this skill for SubMiner code changes. Default to cheap, repo-native verification first. Escalate only when the changed behavior actually depends on Electron, mpv, overlay/window tracking, or other GUI-sensitive runtime behavior.
|
||||
Canonical source:
|
||||
|
||||
## Scripts
|
||||
- `plugins/subminer-workflow/skills/subminer-change-verification/SKILL.md`
|
||||
|
||||
- `scripts/classify_subminer_diff.sh`
|
||||
- Emits suggested lanes and flags from explicit paths or current git changes.
|
||||
- `scripts/verify_subminer_change.sh`
|
||||
- Runs selected lanes, captures artifacts, and writes a compact summary.
|
||||
Canonical helper scripts:
|
||||
|
||||
If you need an explicit installed path, use the directory that contains this `SKILL.md`. The helper scripts live under:
|
||||
- `plugins/subminer-workflow/skills/subminer-change-verification/scripts/classify_subminer_diff.sh`
|
||||
- `plugins/subminer-workflow/skills/subminer-change-verification/scripts/verify_subminer_change.sh`
|
||||
|
||||
```bash
|
||||
export SUBMINER_VERIFY_SKILL="<path-to-skill>"
|
||||
```
|
||||
When this shim is invoked:
|
||||
|
||||
## Default workflow
|
||||
|
||||
1. Inspect the changed files or user-requested area.
|
||||
2. Run the classifier unless you already know the right lane.
|
||||
3. Run the verifier with the cheapest sufficient lane set.
|
||||
4. If the classifier emits `flag:real-runtime-candidate`, do not jump straight to runtime verification. First run the non-runtime lanes.
|
||||
5. Escalate to explicit `--lane real-runtime --allow-real-runtime` only when cheaper lanes cannot validate the behavior claim.
|
||||
6. Return:
|
||||
- verification summary
|
||||
- exact commands run
|
||||
- artifact paths
|
||||
- skipped lanes and blockers
|
||||
|
||||
## Quick start
|
||||
|
||||
Repo-source quick start:
|
||||
|
||||
```bash
|
||||
bash .agents/skills/subminer-change-verification/scripts/classify_subminer_diff.sh
|
||||
```
|
||||
|
||||
Installed-skill quick start:
|
||||
|
||||
```bash
|
||||
bash "$SUBMINER_VERIFY_SKILL/scripts/classify_subminer_diff.sh"
|
||||
```
|
||||
|
||||
Classify explicit files:
|
||||
|
||||
```bash
|
||||
bash .agents/skills/subminer-change-verification/scripts/classify_subminer_diff.sh \
|
||||
launcher/main.ts \
|
||||
plugin/subminer/lifecycle.lua \
|
||||
src/main/runtime/mpv-client-runtime-service.ts
|
||||
```
|
||||
|
||||
Run automatic lane selection:
|
||||
|
||||
```bash
|
||||
bash .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh
|
||||
```
|
||||
|
||||
Installed-skill form:
|
||||
|
||||
```bash
|
||||
bash "$SUBMINER_VERIFY_SKILL/scripts/verify_subminer_change.sh"
|
||||
```
|
||||
|
||||
Run targeted lanes:
|
||||
|
||||
```bash
|
||||
bash .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh \
|
||||
--lane launcher-plugin \
|
||||
--lane runtime-compat
|
||||
```
|
||||
|
||||
Dry-run to inspect planned commands and artifact layout:
|
||||
|
||||
```bash
|
||||
bash .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh \
|
||||
--dry-run \
|
||||
launcher/main.ts \
|
||||
src/main.ts
|
||||
```
|
||||
|
||||
## Lane guidance
|
||||
|
||||
- `docs`
|
||||
- For `docs-site/`, `docs/`, and doc-only edits.
|
||||
- `config`
|
||||
- For `src/config/` and config-template-sensitive edits.
|
||||
- `core`
|
||||
- For general source changes where `typecheck` + `test:fast` is the best cheap signal.
|
||||
- `launcher-plugin`
|
||||
- For `launcher/`, `plugin/subminer/`, plugin gating scripts, and wrapper/mpv routing work.
|
||||
- `runtime-compat`
|
||||
- For `src/main*`, runtime/composer wiring, mpv/overlay services, window trackers, and dist-sensitive behavior.
|
||||
- `real-runtime`
|
||||
- Only after deliberate escalation.
|
||||
|
||||
## Real Runtime Escalation
|
||||
|
||||
Escalate only when the change claim depends on actual runtime behavior, for example:
|
||||
|
||||
- overlay appears, hides, or tracks a real mpv window
|
||||
- mpv launch flags or pause-until-ready behavior
|
||||
- plugin/socket/auto-start handshake under a real player
|
||||
- macOS/window-tracker/focus-sensitive behavior
|
||||
|
||||
If the environment cannot support authoritative runtime verification, report the blocker explicitly. Do not silently downgrade a runtime-required claim to a pass.
|
||||
|
||||
## Artifact contract
|
||||
|
||||
The verifier writes under `.tmp/skill-verification/<timestamp>/`:
|
||||
|
||||
- `summary.json`
|
||||
- `summary.txt`
|
||||
- `classification.txt`
|
||||
- `env.txt`
|
||||
- `lanes.txt`
|
||||
- `steps.tsv`
|
||||
- `steps/*.stdout.log`
|
||||
- `steps/*.stderr.log`
|
||||
|
||||
On failure, quote the exact failing command and point at the artifact directory.
|
||||
1. Read the canonical plugin-owned skill.
|
||||
2. Follow the plugin-owned skill as the source of truth.
|
||||
3. Use the wrapper scripts in this shim directory only for compatibility with existing commands, docs, and backlog history.
|
||||
4. Do not duplicate workflow changes here; update the plugin-owned skill and scripts instead.
|
||||
|
||||
@@ -1,163 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: classify_subminer_diff.sh [path ...]
|
||||
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
REPO_ROOT=$(cd "$SCRIPT_DIR/../../../.." && pwd)
|
||||
TARGET="$REPO_ROOT/plugins/subminer-workflow/skills/subminer-change-verification/scripts/classify_subminer_diff.sh"
|
||||
|
||||
Emit suggested verification lanes for explicit paths or current local git changes.
|
||||
|
||||
Output format:
|
||||
lane:<name>
|
||||
flag:<name>
|
||||
reason:<text>
|
||||
EOF
|
||||
}
|
||||
|
||||
has_item() {
|
||||
local needle=$1
|
||||
shift || true
|
||||
local item
|
||||
for item in "$@"; do
|
||||
if [[ "$item" == "$needle" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
add_lane() {
|
||||
local lane=$1
|
||||
if ! has_item "$lane" "${LANES[@]:-}"; then
|
||||
LANES+=("$lane")
|
||||
fi
|
||||
}
|
||||
|
||||
add_flag() {
|
||||
local flag=$1
|
||||
if ! has_item "$flag" "${FLAGS[@]:-}"; then
|
||||
FLAGS+=("$flag")
|
||||
fi
|
||||
}
|
||||
|
||||
add_reason() {
|
||||
REASONS+=("$1")
|
||||
}
|
||||
|
||||
collect_git_paths() {
|
||||
local top_level
|
||||
if ! top_level=$(git rev-parse --show-toplevel 2>/dev/null); then
|
||||
return 0
|
||||
fi
|
||||
|
||||
(
|
||||
cd "$top_level"
|
||||
if git rev-parse --verify HEAD >/dev/null 2>&1; then
|
||||
git diff --name-only --relative HEAD --
|
||||
git diff --name-only --relative --cached --
|
||||
else
|
||||
git diff --name-only --relative --
|
||||
git diff --name-only --relative --cached --
|
||||
fi
|
||||
git ls-files --others --exclude-standard
|
||||
) | awk 'NF' | sort -u
|
||||
}
|
||||
|
||||
if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then
|
||||
usage
|
||||
exit 0
|
||||
if [[ ! -x "$TARGET" ]]; then
|
||||
echo "Missing canonical script: $TARGET" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -a PATHS=()
|
||||
declare -a LANES=()
|
||||
declare -a FLAGS=()
|
||||
declare -a REASONS=()
|
||||
|
||||
if [[ $# -gt 0 ]]; then
|
||||
while [[ $# -gt 0 ]]; do
|
||||
PATHS+=("$1")
|
||||
shift
|
||||
done
|
||||
else
|
||||
while IFS= read -r line; do
|
||||
[[ -n "$line" ]] && PATHS+=("$line")
|
||||
done < <(collect_git_paths)
|
||||
fi
|
||||
|
||||
if [[ ${#PATHS[@]} -eq 0 ]]; then
|
||||
add_lane "core"
|
||||
add_reason "no changed paths detected -> default to core"
|
||||
fi
|
||||
|
||||
for path in "${PATHS[@]}"; do
|
||||
specialized=0
|
||||
|
||||
case "$path" in
|
||||
docs-site/*|docs/*|changes/*|README.md)
|
||||
add_lane "docs"
|
||||
add_reason "$path -> docs"
|
||||
specialized=1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$path" in
|
||||
src/config/*|src/generate-config-example.ts|src/verify-config-example.ts|docs-site/public/config.example.jsonc|config.example.jsonc)
|
||||
add_lane "config"
|
||||
add_reason "$path -> config"
|
||||
specialized=1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$path" in
|
||||
launcher/*|plugin/subminer/*|plugin/subminer.conf|scripts/test-plugin-*|scripts/get-mpv-window-*|scripts/configure-plugin-binary-path.mjs)
|
||||
add_lane "launcher-plugin"
|
||||
add_reason "$path -> launcher-plugin"
|
||||
add_flag "real-runtime-candidate"
|
||||
add_reason "$path -> real-runtime-candidate"
|
||||
specialized=1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$path" in
|
||||
src/main.ts|src/main-entry.ts|src/preload.ts|src/main/*|src/core/services/mpv*|src/core/services/overlay*|src/renderer/*|src/window-trackers/*|scripts/prepare-build-assets.mjs)
|
||||
add_lane "runtime-compat"
|
||||
add_reason "$path -> runtime-compat"
|
||||
add_flag "real-runtime-candidate"
|
||||
add_reason "$path -> real-runtime-candidate"
|
||||
specialized=1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ "$specialized" == "0" ]]; then
|
||||
case "$path" in
|
||||
src/*|package.json|tsconfig*.json|scripts/*|Makefile)
|
||||
add_lane "core"
|
||||
add_reason "$path -> core"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
case "$path" in
|
||||
package.json|src/main.ts|src/main-entry.ts|src/preload.ts)
|
||||
add_flag "broad-impact"
|
||||
add_reason "$path -> broad-impact"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${#LANES[@]} -eq 0 ]]; then
|
||||
add_lane "core"
|
||||
add_reason "no lane-specific matches -> default to core"
|
||||
fi
|
||||
|
||||
for lane in "${LANES[@]}"; do
|
||||
printf 'lane:%s\n' "$lane"
|
||||
done
|
||||
|
||||
for flag in "${FLAGS[@]}"; do
|
||||
printf 'flag:%s\n' "$flag"
|
||||
done
|
||||
|
||||
for reason in "${REASONS[@]}"; do
|
||||
printf 'reason:%s\n' "$reason"
|
||||
done
|
||||
exec "$TARGET" "$@"
|
||||
|
||||
@@ -1,566 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: verify_subminer_change.sh [options] [path ...]
|
||||
|
||||
Options:
|
||||
--lane <name> Force a verification lane. Repeatable.
|
||||
--artifact-dir <dir> Use an explicit artifact directory.
|
||||
--allow-real-runtime Allow explicit real-runtime execution.
|
||||
--allow-real-gui Deprecated alias for --allow-real-runtime.
|
||||
--dry-run Record planned steps without executing commands.
|
||||
--help Show this help text.
|
||||
|
||||
If no lanes are supplied, the script classifies the provided paths. If no paths are
|
||||
provided, it classifies the current local git changes.
|
||||
|
||||
Authoritative real-runtime verification should be requested with explicit path
|
||||
arguments instead of relying on inferred local git changes.
|
||||
EOF
|
||||
}
|
||||
|
||||
timestamp() {
|
||||
date +%Y%m%d-%H%M%S
|
||||
}
|
||||
|
||||
timestamp_iso() {
|
||||
date -u +%Y-%m-%dT%H:%M:%SZ
|
||||
}
|
||||
|
||||
generate_session_id() {
|
||||
local tmp_dir
|
||||
tmp_dir=$(mktemp -d "${TMPDIR:-/tmp}/subminer-verify-$(timestamp)-XXXXXX")
|
||||
basename "$tmp_dir"
|
||||
rmdir "$tmp_dir"
|
||||
}
|
||||
|
||||
has_item() {
|
||||
local needle=$1
|
||||
shift || true
|
||||
local item
|
||||
for item in "$@"; do
|
||||
if [[ "$item" == "$needle" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
normalize_lane_name() {
|
||||
case "$1" in
|
||||
real-gui)
|
||||
printf '%s' "real-runtime"
|
||||
;;
|
||||
*)
|
||||
printf '%s' "$1"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
add_lane() {
|
||||
local lane
|
||||
lane=$(normalize_lane_name "$1")
|
||||
if ! has_item "$lane" "${SELECTED_LANES[@]:-}"; then
|
||||
SELECTED_LANES+=("$lane")
|
||||
fi
|
||||
}
|
||||
|
||||
add_blocker() {
|
||||
BLOCKERS+=("$1")
|
||||
BLOCKED=1
|
||||
}
|
||||
|
||||
append_step_record() {
|
||||
printf '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' \
|
||||
"$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" >>"$STEPS_TSV"
|
||||
}
|
||||
|
||||
record_env() {
|
||||
{
|
||||
printf 'repo_root=%s\n' "$REPO_ROOT"
|
||||
printf 'session_id=%s\n' "$SESSION_ID"
|
||||
printf 'artifact_dir=%s\n' "$ARTIFACT_DIR"
|
||||
printf 'path_selection_mode=%s\n' "$PATH_SELECTION_MODE"
|
||||
printf 'dry_run=%s\n' "$DRY_RUN"
|
||||
printf 'allow_real_runtime=%s\n' "$ALLOW_REAL_RUNTIME"
|
||||
printf 'session_home=%s\n' "$SESSION_HOME"
|
||||
printf 'session_xdg_config_home=%s\n' "$SESSION_XDG_CONFIG_HOME"
|
||||
printf 'session_mpv_dir=%s\n' "$SESSION_MPV_DIR"
|
||||
printf 'session_logs_dir=%s\n' "$SESSION_LOGS_DIR"
|
||||
printf 'session_mpv_log=%s\n' "$SESSION_MPV_LOG"
|
||||
printf 'pwd=%s\n' "$(pwd)"
|
||||
git rev-parse --short HEAD 2>/dev/null | sed 's/^/git_head=/' || true
|
||||
git status --short 2>/dev/null || true
|
||||
if [[ ${#PATH_ARGS[@]} -gt 0 ]]; then
|
||||
printf 'requested_paths=\n'
|
||||
printf ' %s\n' "${PATH_ARGS[@]}"
|
||||
fi
|
||||
} >"$ARTIFACT_DIR/env.txt"
|
||||
}
|
||||
|
||||
run_step() {
|
||||
local lane=$1
|
||||
local name=$2
|
||||
local command=$3
|
||||
local note=${4:-}
|
||||
local slug=${name//[^a-zA-Z0-9_-]/-}
|
||||
local stdout_rel="steps/${slug}.stdout.log"
|
||||
local stderr_rel="steps/${slug}.stderr.log"
|
||||
local stdout_path="$ARTIFACT_DIR/$stdout_rel"
|
||||
local stderr_path="$ARTIFACT_DIR/$stderr_rel"
|
||||
local status exit_code
|
||||
|
||||
COMMANDS_RUN+=("$command")
|
||||
printf '%s\n' "$command" >"$ARTIFACT_DIR/steps/${slug}.command.txt"
|
||||
|
||||
if [[ "$DRY_RUN" == "1" ]]; then
|
||||
printf '[dry-run] %s\n' "$command" >"$stdout_path"
|
||||
: >"$stderr_path"
|
||||
status="dry-run"
|
||||
exit_code=0
|
||||
else
|
||||
if bash -lc "cd \"$REPO_ROOT\" && $command" >"$stdout_path" 2>"$stderr_path"; then
|
||||
status="passed"
|
||||
exit_code=0
|
||||
EXECUTED_REAL_STEPS=1
|
||||
else
|
||||
exit_code=$?
|
||||
status="failed"
|
||||
FAILED=1
|
||||
fi
|
||||
fi
|
||||
|
||||
append_step_record "$lane" "$name" "$status" "$exit_code" "$command" "$stdout_rel" "$stderr_rel" "$note"
|
||||
printf '%s\t%s\t%s\n' "$lane" "$name" "$status"
|
||||
|
||||
if [[ "$status" == "failed" ]]; then
|
||||
FAILURE_STEP="$name"
|
||||
FAILURE_COMMAND="$command"
|
||||
FAILURE_STDOUT="$stdout_rel"
|
||||
FAILURE_STDERR="$stderr_rel"
|
||||
return "$exit_code"
|
||||
fi
|
||||
}
|
||||
|
||||
record_nonpassing_step() {
|
||||
local lane=$1
|
||||
local name=$2
|
||||
local status=$3
|
||||
local note=$4
|
||||
local slug=${name//[^a-zA-Z0-9_-]/-}
|
||||
local stdout_rel="steps/${slug}.stdout.log"
|
||||
local stderr_rel="steps/${slug}.stderr.log"
|
||||
printf '%s\n' "$note" >"$ARTIFACT_DIR/$stdout_rel"
|
||||
: >"$ARTIFACT_DIR/$stderr_rel"
|
||||
append_step_record "$lane" "$name" "$status" "0" "" "$stdout_rel" "$stderr_rel" "$note"
|
||||
printf '%s\t%s\t%s\n' "$lane" "$name" "$status"
|
||||
}
|
||||
|
||||
record_skipped_step() {
|
||||
record_nonpassing_step "$1" "$2" "skipped" "$3"
|
||||
}
|
||||
|
||||
record_blocked_step() {
|
||||
add_blocker "$3"
|
||||
record_nonpassing_step "$1" "$2" "blocked" "$3"
|
||||
}
|
||||
|
||||
record_failed_step() {
|
||||
FAILED=1
|
||||
FAILURE_STEP=$2
|
||||
FAILURE_COMMAND=${FAILURE_COMMAND:-"(validation)"}
|
||||
FAILURE_STDOUT="steps/${2//[^a-zA-Z0-9_-]/-}.stdout.log"
|
||||
FAILURE_STDERR="steps/${2//[^a-zA-Z0-9_-]/-}.stderr.log"
|
||||
add_blocker "$3"
|
||||
record_nonpassing_step "$1" "$2" "failed" "$3"
|
||||
}
|
||||
|
||||
find_real_runtime_helper() {
|
||||
local candidate
|
||||
for candidate in \
|
||||
"$SCRIPT_DIR/run_real_runtime_smoke.sh" \
|
||||
"$SCRIPT_DIR/run_real_mpv_smoke.sh"; do
|
||||
if [[ -x "$candidate" ]]; then
|
||||
printf '%s' "$candidate"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
acquire_real_runtime_lease() {
|
||||
local lease_root="$REPO_ROOT/.tmp/skill-verification/locks"
|
||||
local lease_dir="$lease_root/exclusive-real-runtime"
|
||||
mkdir -p "$lease_root"
|
||||
if mkdir "$lease_dir" 2>/dev/null; then
|
||||
REAL_RUNTIME_LEASE_DIR="$lease_dir"
|
||||
printf '%s\n' "$SESSION_ID" >"$lease_dir/session_id"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local owner=""
|
||||
if [[ -f "$lease_dir/session_id" ]]; then
|
||||
owner=$(cat "$lease_dir/session_id")
|
||||
fi
|
||||
add_blocker "real-runtime lease already held${owner:+ by $owner}"
|
||||
return 1
|
||||
}
|
||||
|
||||
release_real_runtime_lease() {
|
||||
if [[ -n "$REAL_RUNTIME_LEASE_DIR" && -d "$REAL_RUNTIME_LEASE_DIR" ]]; then
|
||||
if [[ -f "$REAL_RUNTIME_LEASE_DIR/session_id" ]]; then
|
||||
local owner
|
||||
owner=$(cat "$REAL_RUNTIME_LEASE_DIR/session_id")
|
||||
if [[ "$owner" != "$SESSION_ID" ]]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
rm -rf "$REAL_RUNTIME_LEASE_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
compute_final_status() {
|
||||
if [[ "$FAILED" == "1" ]]; then
|
||||
FINAL_STATUS="failed"
|
||||
elif [[ "$BLOCKED" == "1" ]]; then
|
||||
FINAL_STATUS="blocked"
|
||||
elif [[ "$EXECUTED_REAL_STEPS" == "1" ]]; then
|
||||
FINAL_STATUS="passed"
|
||||
else
|
||||
FINAL_STATUS="skipped"
|
||||
fi
|
||||
}
|
||||
|
||||
write_summary_files() {
|
||||
local lane_lines
|
||||
lane_lines=$(printf '%s\n' "${SELECTED_LANES[@]}")
|
||||
printf '%s\n' "$lane_lines" >"$ARTIFACT_DIR/lanes.txt"
|
||||
printf '%s\n' "${BLOCKERS[@]}" >"$ARTIFACT_DIR/blockers.txt"
|
||||
printf '%s\n' "${PATH_ARGS[@]}" >"$ARTIFACT_DIR/requested-paths.txt"
|
||||
|
||||
ARTIFACT_DIR_ENV="$ARTIFACT_DIR" \
|
||||
SESSION_ID_ENV="$SESSION_ID" \
|
||||
FINAL_STATUS_ENV="$FINAL_STATUS" \
|
||||
PATH_SELECTION_MODE_ENV="$PATH_SELECTION_MODE" \
|
||||
ALLOW_REAL_RUNTIME_ENV="$ALLOW_REAL_RUNTIME" \
|
||||
SESSION_HOME_ENV="$SESSION_HOME" \
|
||||
SESSION_XDG_CONFIG_HOME_ENV="$SESSION_XDG_CONFIG_HOME" \
|
||||
SESSION_MPV_DIR_ENV="$SESSION_MPV_DIR" \
|
||||
SESSION_LOGS_DIR_ENV="$SESSION_LOGS_DIR" \
|
||||
SESSION_MPV_LOG_ENV="$SESSION_MPV_LOG" \
|
||||
STARTED_AT_ENV="$STARTED_AT" \
|
||||
FINISHED_AT_ENV="$FINISHED_AT" \
|
||||
FAILED_ENV="$FAILED" \
|
||||
FAILURE_COMMAND_ENV="${FAILURE_COMMAND:-}" \
|
||||
FAILURE_STDOUT_ENV="${FAILURE_STDOUT:-}" \
|
||||
FAILURE_STDERR_ENV="${FAILURE_STDERR:-}" \
|
||||
bun -e '
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
function readLines(filePath) {
|
||||
if (!fs.existsSync(filePath)) return [];
|
||||
return fs.readFileSync(filePath, "utf8").split(/\r?\n/).filter(Boolean);
|
||||
}
|
||||
|
||||
const artifactDir = process.env.ARTIFACT_DIR_ENV;
|
||||
const reportsDir = path.join(artifactDir, "reports");
|
||||
const lanes = readLines(path.join(artifactDir, "lanes.txt"));
|
||||
const blockers = readLines(path.join(artifactDir, "blockers.txt"));
|
||||
const requestedPaths = readLines(path.join(artifactDir, "requested-paths.txt"));
|
||||
const steps = readLines(path.join(artifactDir, "steps.tsv")).map((line) => {
|
||||
const [lane, name, status, exitCode, command, stdout, stderr, note] = line.split("\t");
|
||||
return {
|
||||
lane,
|
||||
name,
|
||||
status,
|
||||
exitCode: Number(exitCode || 0),
|
||||
command,
|
||||
stdout,
|
||||
stderr,
|
||||
note,
|
||||
};
|
||||
});
|
||||
const summary = {
|
||||
sessionId: process.env.SESSION_ID_ENV || "",
|
||||
artifactDir,
|
||||
reportsDir,
|
||||
status: process.env.FINAL_STATUS_ENV || "failed",
|
||||
selectedLanes: lanes,
|
||||
failed: process.env.FAILED_ENV === "1",
|
||||
failure:
|
||||
process.env.FAILED_ENV === "1"
|
||||
? {
|
||||
command: process.env.FAILURE_COMMAND_ENV || "",
|
||||
stdout: process.env.FAILURE_STDOUT_ENV || "",
|
||||
stderr: process.env.FAILURE_STDERR_ENV || "",
|
||||
}
|
||||
: null,
|
||||
blockers,
|
||||
pathSelectionMode: process.env.PATH_SELECTION_MODE_ENV || "git-inferred",
|
||||
requestedPaths,
|
||||
allowRealRuntime: process.env.ALLOW_REAL_RUNTIME_ENV === "1",
|
||||
startedAt: process.env.STARTED_AT_ENV || "",
|
||||
finishedAt: process.env.FINISHED_AT_ENV || "",
|
||||
env: {
|
||||
home: process.env.SESSION_HOME_ENV || "",
|
||||
xdgConfigHome: process.env.SESSION_XDG_CONFIG_HOME_ENV || "",
|
||||
mpvDir: process.env.SESSION_MPV_DIR_ENV || "",
|
||||
logsDir: process.env.SESSION_LOGS_DIR_ENV || "",
|
||||
mpvLog: process.env.SESSION_MPV_LOG_ENV || "",
|
||||
},
|
||||
steps,
|
||||
};
|
||||
|
||||
const summaryJson = JSON.stringify(summary, null, 2) + "\n";
|
||||
fs.writeFileSync(path.join(artifactDir, "summary.json"), summaryJson);
|
||||
fs.writeFileSync(path.join(reportsDir, "summary.json"), summaryJson);
|
||||
|
||||
const lines = [];
|
||||
lines.push(`session_id: ${summary.sessionId}`);
|
||||
lines.push(`artifact_dir: ${artifactDir}`);
|
||||
lines.push(`selected_lanes: ${lanes.join(", ") || "(none)"}`);
|
||||
lines.push(`status: ${summary.status}`);
|
||||
lines.push(`path_selection_mode: ${summary.pathSelectionMode}`);
|
||||
if (requestedPaths.length > 0) {
|
||||
lines.push(`requested_paths: ${requestedPaths.join(", ")}`);
|
||||
}
|
||||
if (blockers.length > 0) {
|
||||
lines.push(`blockers: ${blockers.join(" | ")}`);
|
||||
}
|
||||
for (const step of steps) {
|
||||
lines.push(`${step.lane}/${step.name}: ${step.status}`);
|
||||
if (step.command) lines.push(` command: ${step.command}`);
|
||||
lines.push(` stdout: ${step.stdout}`);
|
||||
lines.push(` stderr: ${step.stderr}`);
|
||||
if (step.note) lines.push(` note: ${step.note}`);
|
||||
}
|
||||
if (summary.failed) {
|
||||
lines.push(`failure_command: ${process.env.FAILURE_COMMAND_ENV || ""}`);
|
||||
}
|
||||
const summaryText = lines.join("\n") + "\n";
|
||||
fs.writeFileSync(path.join(artifactDir, "summary.txt"), summaryText);
|
||||
fs.writeFileSync(path.join(reportsDir, "summary.txt"), summaryText);
|
||||
'
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
release_real_runtime_lease
|
||||
}
|
||||
|
||||
CLASSIFIER_OUTPUT=""
|
||||
ARTIFACT_DIR=""
|
||||
ALLOW_REAL_RUNTIME=0
|
||||
DRY_RUN=0
|
||||
FAILED=0
|
||||
BLOCKED=0
|
||||
EXECUTED_REAL_STEPS=0
|
||||
FINAL_STATUS=""
|
||||
FAILURE_STEP=""
|
||||
FAILURE_COMMAND=""
|
||||
FAILURE_STDOUT=""
|
||||
FAILURE_STDERR=""
|
||||
REAL_RUNTIME_LEASE_DIR=""
|
||||
STARTED_AT=""
|
||||
FINISHED_AT=""
|
||||
|
||||
declare -a EXPLICIT_LANES=()
|
||||
declare -a SELECTED_LANES=()
|
||||
declare -a PATH_ARGS=()
|
||||
declare -a COMMANDS_RUN=()
|
||||
declare -a BLOCKERS=()
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--lane)
|
||||
EXPLICIT_LANES+=("$(normalize_lane_name "$2")")
|
||||
shift 2
|
||||
;;
|
||||
--artifact-dir)
|
||||
ARTIFACT_DIR=$2
|
||||
shift 2
|
||||
;;
|
||||
--allow-real-runtime|--allow-real-gui)
|
||||
ALLOW_REAL_RUNTIME=1
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=1
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
while [[ $# -gt 0 ]]; do
|
||||
PATH_ARGS+=("$1")
|
||||
shift
|
||||
done
|
||||
;;
|
||||
*)
|
||||
PATH_ARGS+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel 2>/dev/null || pwd)
|
||||
SESSION_ID=$(generate_session_id)
|
||||
PATH_SELECTION_MODE="git-inferred"
|
||||
if [[ ${#PATH_ARGS[@]} -gt 0 ]]; then
|
||||
PATH_SELECTION_MODE="explicit"
|
||||
REPO_ROOT=$(cd "$SCRIPT_DIR/../../../.." && pwd)
|
||||
TARGET="$REPO_ROOT/plugins/subminer-workflow/skills/subminer-change-verification/scripts/verify_subminer_change.sh"
|
||||
|
||||
if [[ ! -x "$TARGET" ]]; then
|
||||
echo "Missing canonical script: $TARGET" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$ARTIFACT_DIR" ]]; then
|
||||
mkdir -p "$REPO_ROOT/.tmp/skill-verification"
|
||||
ARTIFACT_DIR="$REPO_ROOT/.tmp/skill-verification/$SESSION_ID"
|
||||
fi
|
||||
|
||||
SESSION_HOME="$ARTIFACT_DIR/home"
|
||||
SESSION_XDG_CONFIG_HOME="$ARTIFACT_DIR/xdg"
|
||||
SESSION_MPV_DIR="$ARTIFACT_DIR/mpv"
|
||||
SESSION_LOGS_DIR="$ARTIFACT_DIR/logs"
|
||||
SESSION_MPV_LOG="$SESSION_LOGS_DIR/mpv.log"
|
||||
|
||||
mkdir -p "$ARTIFACT_DIR/steps" "$ARTIFACT_DIR/reports" "$SESSION_HOME" "$SESSION_XDG_CONFIG_HOME" "$SESSION_MPV_DIR" "$SESSION_LOGS_DIR"
|
||||
STEPS_TSV="$ARTIFACT_DIR/steps.tsv"
|
||||
: >"$STEPS_TSV"
|
||||
|
||||
trap cleanup EXIT
|
||||
STARTED_AT=$(timestamp_iso)
|
||||
|
||||
if [[ ${#EXPLICIT_LANES[@]} -gt 0 ]]; then
|
||||
local_lane=""
|
||||
for local_lane in "${EXPLICIT_LANES[@]}"; do
|
||||
add_lane "$local_lane"
|
||||
done
|
||||
printf 'reason:explicit lanes supplied\n' >"$ARTIFACT_DIR/classification.txt"
|
||||
else
|
||||
if [[ ${#PATH_ARGS[@]} -gt 0 ]]; then
|
||||
CLASSIFIER_OUTPUT=$(bash "$SCRIPT_DIR/classify_subminer_diff.sh" "${PATH_ARGS[@]}")
|
||||
else
|
||||
CLASSIFIER_OUTPUT=$(bash "$SCRIPT_DIR/classify_subminer_diff.sh")
|
||||
fi
|
||||
printf '%s\n' "$CLASSIFIER_OUTPUT" >"$ARTIFACT_DIR/classification.txt"
|
||||
while IFS= read -r line; do
|
||||
case "$line" in
|
||||
lane:*)
|
||||
add_lane "${line#lane:}"
|
||||
;;
|
||||
esac
|
||||
done <<<"$CLASSIFIER_OUTPUT"
|
||||
fi
|
||||
|
||||
record_env
|
||||
|
||||
printf 'artifact_dir=%s\n' "$ARTIFACT_DIR"
|
||||
printf 'selected_lanes=%s\n' "$(IFS=,; echo "${SELECTED_LANES[*]}")"
|
||||
|
||||
for lane in "${SELECTED_LANES[@]}"; do
|
||||
case "$lane" in
|
||||
docs)
|
||||
run_step "$lane" "docs-test" "bun run docs:test" || break
|
||||
[[ "$FAILED" == "1" ]] && break
|
||||
run_step "$lane" "docs-build" "bun run docs:build" || break
|
||||
;;
|
||||
config)
|
||||
run_step "$lane" "test-config" "bun run test:config" || break
|
||||
;;
|
||||
core)
|
||||
run_step "$lane" "typecheck" "bun run typecheck" || break
|
||||
[[ "$FAILED" == "1" ]] && break
|
||||
run_step "$lane" "test-fast" "bun run test:fast" || break
|
||||
;;
|
||||
launcher-plugin)
|
||||
run_step "$lane" "launcher-smoke-src" "bun run test:launcher:smoke:src" || break
|
||||
[[ "$FAILED" == "1" ]] && break
|
||||
run_step "$lane" "plugin-src" "bun run test:plugin:src" || break
|
||||
;;
|
||||
runtime-compat)
|
||||
run_step "$lane" "build" "bun run build" || break
|
||||
[[ "$FAILED" == "1" ]] && break
|
||||
run_step "$lane" "test-runtime-compat" "bun run test:runtime:compat" || break
|
||||
[[ "$FAILED" == "1" ]] && break
|
||||
run_step "$lane" "test-smoke-dist" "bun run test:smoke:dist" || break
|
||||
;;
|
||||
real-runtime)
|
||||
if [[ "$PATH_SELECTION_MODE" != "explicit" ]]; then
|
||||
record_blocked_step \
|
||||
"$lane" \
|
||||
"real-runtime-guard" \
|
||||
"real-runtime lane requires explicit paths; inferred local git changes are non-authoritative"
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ "$ALLOW_REAL_RUNTIME" != "1" ]]; then
|
||||
record_blocked_step \
|
||||
"$lane" \
|
||||
"real-runtime-guard" \
|
||||
"real-runtime lane requested but --allow-real-runtime was not supplied"
|
||||
break
|
||||
fi
|
||||
|
||||
if ! acquire_real_runtime_lease; then
|
||||
record_blocked_step \
|
||||
"$lane" \
|
||||
"real-runtime-lease" \
|
||||
"real-runtime lease already held; rerun after the active runtime verification finishes"
|
||||
break
|
||||
fi
|
||||
|
||||
if ! REAL_RUNTIME_HELPER=$(find_real_runtime_helper); then
|
||||
record_blocked_step \
|
||||
"$lane" \
|
||||
"real-runtime-helper" \
|
||||
"real-runtime helper not implemented yet"
|
||||
break
|
||||
fi
|
||||
|
||||
printf -v REAL_RUNTIME_COMMAND \
|
||||
'SESSION_ID=%q HOME=%q XDG_CONFIG_HOME=%q SUBMINER_MPV_LOG=%q bash %q' \
|
||||
"$SESSION_ID" \
|
||||
"$SESSION_HOME" \
|
||||
"$SESSION_XDG_CONFIG_HOME" \
|
||||
"$SESSION_MPV_LOG" \
|
||||
"$REAL_RUNTIME_HELPER"
|
||||
|
||||
run_step "$lane" "real-runtime-smoke" "$REAL_RUNTIME_COMMAND" || break
|
||||
;;
|
||||
*)
|
||||
record_failed_step "$lane" "lane-validation" "unknown lane: $lane"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ "$FAILED" == "1" || "$BLOCKED" == "1" ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
FINISHED_AT=$(timestamp_iso)
|
||||
compute_final_status
|
||||
write_summary_files
|
||||
|
||||
printf 'status=%s\n' "$FINAL_STATUS"
|
||||
printf 'artifact_dir=%s\n' "$ARTIFACT_DIR"
|
||||
|
||||
case "$FINAL_STATUS" in
|
||||
failed)
|
||||
printf 'result=failed\n'
|
||||
printf 'failure_command=%s\n' "$FAILURE_COMMAND"
|
||||
exit 1
|
||||
;;
|
||||
blocked)
|
||||
printf 'result=blocked\n'
|
||||
exit 2
|
||||
;;
|
||||
*)
|
||||
printf 'result=ok\n'
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
exec "$TARGET" "$@"
|
||||
|
||||
@@ -1,146 +1,18 @@
|
||||
---
|
||||
name: "subminer-scrum-master"
|
||||
description: "Use in the SubMiner repo when a request should be turned into planned work and driven through execution. Assesses whether backlog tracking is warranted, creates or updates tasks when needed, records a plan, dispatches one or more subagents, and requires verification before handoff."
|
||||
name: 'subminer-scrum-master'
|
||||
description: 'Compatibility shim. Canonical SubMiner scrum-master workflow now lives in the repo-local subminer-workflow plugin.'
|
||||
---
|
||||
|
||||
# SubMiner Scrum Master
|
||||
# Compatibility Shim
|
||||
|
||||
Own workflow, not code by default.
|
||||
Canonical source:
|
||||
|
||||
Use this skill when the user gives a feature request, bug report, issue, refactor, or implementation ask and the agent should manage intake, planning, backlog hygiene, worker dispatch, and verification through completion.
|
||||
- `plugins/subminer-workflow/skills/subminer-scrum-master/SKILL.md`
|
||||
|
||||
## Core Rules
|
||||
When this shim is invoked:
|
||||
|
||||
1. Decide first whether backlog tracking is warranted.
|
||||
2. If backlog is needed, search first. Update existing work when it clearly matches.
|
||||
3. If backlog is not needed, keep the process light. Do not invent ticket ceremony.
|
||||
4. Record a plan before dispatching coding work.
|
||||
5. Use parent + subtasks for multi-part work when backlog is used.
|
||||
6. Dispatch conservatively. Parallelize only disjoint write scopes.
|
||||
7. Require verification before handoff, typically via `subminer-change-verification`.
|
||||
8. Report backlog actions, dispatched workers, verification, blockers, and remaining risks.
|
||||
1. Read the canonical plugin-owned skill.
|
||||
2. Follow the plugin-owned skill as the source of truth.
|
||||
3. Do not duplicate workflow changes here; update the plugin-owned skill instead.
|
||||
|
||||
## Backlog Decision
|
||||
|
||||
Skip backlog when the request is:
|
||||
- question only
|
||||
- obvious mechanical edit
|
||||
- tiny isolated change with no real planning
|
||||
|
||||
Use backlog when the work:
|
||||
- needs planning or scope decisions
|
||||
- spans multiple phases or subsystems
|
||||
- is likely to need subagent dispatch
|
||||
- should remain traceable for handoff/resume
|
||||
|
||||
If backlog is used:
|
||||
- search existing tasks first
|
||||
- create/update a standalone task for one focused deliverable
|
||||
- create/update a parent task plus subtasks for multi-part work
|
||||
- record the implementation plan in the task before implementation begins
|
||||
|
||||
## Intake Workflow
|
||||
|
||||
1. Parse the request.
|
||||
Classify it as question, mechanical edit, bugfix, feature, refactor, investigation, or follow-up.
|
||||
2. Decide whether backlog is needed.
|
||||
3. If backlog is needed:
|
||||
- search first
|
||||
- update existing task if clearly relevant
|
||||
- otherwise create the right structure
|
||||
- write the implementation plan before dispatch
|
||||
4. If backlog is skipped:
|
||||
- write a short working plan in-thread
|
||||
- proceed without fake ticketing
|
||||
5. Choose execution mode:
|
||||
- no subagents for trivial work
|
||||
- one worker for focused work
|
||||
- parallel workers only for disjoint scopes
|
||||
6. Run verification before handoff.
|
||||
|
||||
## Dispatch Rules
|
||||
|
||||
The scrum master orchestrates. Workers implement.
|
||||
|
||||
- Do not become the default implementer unless delegation is unnecessary.
|
||||
- Do not parallelize overlapping files or tightly coupled runtime work.
|
||||
- Give every worker explicit ownership of files/modules.
|
||||
- Tell every worker other agents may be active and they must not revert unrelated edits.
|
||||
- Require each worker to report:
|
||||
- changed files
|
||||
- tests run
|
||||
- blockers
|
||||
|
||||
Use worker agents for implementation and explorer agents only for bounded codebase questions.
|
||||
|
||||
## Verification
|
||||
|
||||
Every nontrivial code task gets verification.
|
||||
|
||||
Preferred flow:
|
||||
1. use `subminer-change-verification`
|
||||
2. start with the cheapest sufficient lane
|
||||
3. escalate only when needed
|
||||
4. if worker verification is sufficient, accept it or run one final consolidating pass
|
||||
|
||||
Never hand off nontrivial work without stating what was verified and what was skipped.
|
||||
|
||||
## Pre-Handoff Policy Checks (Required)
|
||||
|
||||
Before handoff, always ask and answer both of these questions explicitly:
|
||||
|
||||
1. **Docs update required?**
|
||||
2. **Changelog fragment required?**
|
||||
|
||||
Rules:
|
||||
- Do not assume silence implies "no." Record an explicit yes/no decision for each item.
|
||||
- If the answer is yes, either complete the update or report the blocker before handoff.
|
||||
- Include the final answers in the handoff summary even when both answers are "no."
|
||||
|
||||
## Failure / Scope Handling
|
||||
|
||||
- If a worker hits ambiguity, pause and ask the user.
|
||||
- If verification fails, either:
|
||||
- send the worker back with exact failure context, or
|
||||
- fix it directly if it is tiny and clearly in scope
|
||||
- If new scope appears, revisit backlog structure before silently expanding work.
|
||||
|
||||
## Representative Flows
|
||||
|
||||
### Trivial no-ticket work
|
||||
|
||||
- decide backlog is unnecessary
|
||||
- keep a short plan
|
||||
- implement directly or with one worker if helpful
|
||||
- run targeted verification
|
||||
- report outcome concisely
|
||||
|
||||
### Single-task implementation
|
||||
|
||||
- search/create/update one task
|
||||
- record plan
|
||||
- dispatch one worker
|
||||
- integrate
|
||||
- verify
|
||||
- update task and report outcome
|
||||
|
||||
### Parent + subtasks execution
|
||||
|
||||
- search/create/update parent task
|
||||
- create subtasks for distinct deliverables/phases
|
||||
- record sequencing in the plan
|
||||
- dispatch workers only where scopes are disjoint
|
||||
- integrate
|
||||
- run consolidated verification
|
||||
- update task state and report outcome
|
||||
|
||||
## Output Expectations
|
||||
|
||||
At the end, report:
|
||||
- whether backlog was used and what changed
|
||||
- which workers were dispatched and what they owned
|
||||
- what verification ran
|
||||
- explicit answers to:
|
||||
- docs update required?
|
||||
- changelog fragment required?
|
||||
- blockers, skips, and risks
|
||||
This shim exists so existing repo references and prompts keep resolving during the migration to the repo-local plugin workflow.
|
||||
|
||||
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@@ -61,6 +61,16 @@ jobs:
|
||||
- name: Test suite (source)
|
||||
run: bun run test:fast
|
||||
|
||||
- name: Coverage suite (maintained source lane)
|
||||
run: bun run test:coverage:src
|
||||
|
||||
- name: Upload coverage artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-test-src
|
||||
path: coverage/test-src/lcov.info
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Launcher smoke suite (source)
|
||||
run: bun run test:launcher:smoke:src
|
||||
|
||||
|
||||
75
.github/workflows/release.yml
vendored
75
.github/workflows/release.yml
vendored
@@ -49,6 +49,16 @@ jobs:
|
||||
- name: Test suite (source)
|
||||
run: bun run test:fast
|
||||
|
||||
- name: Coverage suite (maintained source lane)
|
||||
run: bun run test:coverage:src
|
||||
|
||||
- name: Upload coverage artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-test-src
|
||||
path: coverage/test-src/lcov.info
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Launcher smoke suite (source)
|
||||
run: bun run test:launcher:smoke:src
|
||||
|
||||
@@ -399,33 +409,64 @@ jobs:
|
||||
id: version
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Validate AUR SSH secret
|
||||
- name: Check AUR publish prerequisites
|
||||
id: aur_prereqs
|
||||
env:
|
||||
AUR_SSH_PRIVATE_KEY: ${{ secrets.AUR_SSH_PRIVATE_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -z "${AUR_SSH_PRIVATE_KEY}" ]; then
|
||||
echo "Missing required secret: AUR_SSH_PRIVATE_KEY"
|
||||
exit 1
|
||||
echo "::warning::Missing AUR_SSH_PRIVATE_KEY; skipping automated AUR publish."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
echo "skip=false" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Configure SSH for AUR
|
||||
id: aur_ssh
|
||||
if: steps.aur_prereqs.outputs.skip != 'true'
|
||||
env:
|
||||
AUR_SSH_PRIVATE_KEY: ${{ secrets.AUR_SSH_PRIVATE_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
install -dm700 ~/.ssh
|
||||
printf '%s\n' "${AUR_SSH_PRIVATE_KEY}" > ~/.ssh/aur
|
||||
chmod 600 ~/.ssh/aur
|
||||
ssh-keyscan aur.archlinux.org >> ~/.ssh/known_hosts
|
||||
chmod 644 ~/.ssh/known_hosts
|
||||
if install -dm700 ~/.ssh \
|
||||
&& printf '%s\n' "${AUR_SSH_PRIVATE_KEY}" > ~/.ssh/aur \
|
||||
&& chmod 600 ~/.ssh/aur \
|
||||
&& ssh-keyscan aur.archlinux.org >> ~/.ssh/known_hosts \
|
||||
&& chmod 644 ~/.ssh/known_hosts; then
|
||||
echo "skip=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "::warning::Unable to configure SSH for AUR; skipping automated AUR publish."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Clone AUR repo
|
||||
id: aur_clone
|
||||
if: steps.aur_prereqs.outputs.skip != 'true' && steps.aur_ssh.outputs.skip != 'true'
|
||||
env:
|
||||
GIT_SSH_COMMAND: ssh -i ~/.ssh/aur -o IdentitiesOnly=yes
|
||||
run: git clone ssh://aur@aur.archlinux.org/subminer-bin.git aur-subminer-bin
|
||||
run: |
|
||||
set -euo pipefail
|
||||
attempts=3
|
||||
for attempt in $(seq 1 "$attempts"); do
|
||||
if git clone ssh://aur@aur.archlinux.org/subminer-bin.git aur-subminer-bin; then
|
||||
echo "skip=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rm -rf aur-subminer-bin
|
||||
|
||||
if [ "$attempt" -lt "$attempts" ]; then
|
||||
sleep $((attempt * 15))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "::warning::Unable to clone subminer-bin from AUR after ${attempts} attempts; skipping automated AUR publish."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Download release assets for AUR
|
||||
if: steps.aur_prereqs.outputs.skip != 'true' && steps.aur_ssh.outputs.skip != 'true' && steps.aur_clone.outputs.skip != 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
@@ -439,6 +480,7 @@ jobs:
|
||||
--pattern "subminer-assets.tar.gz"
|
||||
|
||||
- name: Update AUR packaging metadata
|
||||
if: steps.aur_prereqs.outputs.skip != 'true' && steps.aur_ssh.outputs.skip != 'true' && steps.aur_clone.outputs.skip != 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
version_no_v="${{ steps.version.outputs.VERSION }}"
|
||||
@@ -453,6 +495,7 @@ jobs:
|
||||
--assets ".tmp/aur-release-assets/subminer-assets.tar.gz"
|
||||
|
||||
- name: Commit and push AUR update
|
||||
if: steps.aur_prereqs.outputs.skip != 'true' && steps.aur_ssh.outputs.skip != 'true' && steps.aur_clone.outputs.skip != 'true'
|
||||
working-directory: aur-subminer-bin
|
||||
env:
|
||||
GIT_SSH_COMMAND: ssh -i ~/.ssh/aur -o IdentitiesOnly=yes
|
||||
@@ -466,4 +509,16 @@ jobs:
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add PKGBUILD .SRCINFO
|
||||
git commit -m "Update to ${{ steps.version.outputs.VERSION }}"
|
||||
git push origin HEAD:master
|
||||
|
||||
attempts=3
|
||||
for attempt in $(seq 1 "$attempts"); do
|
||||
if git push origin HEAD:master; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$attempt" -lt "$attempts" ]; then
|
||||
sleep $((attempt * 15))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "::warning::Unable to push the AUR update after ${attempts} attempts; GitHub release is published, but subminer-bin needs manual follow-up."
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,6 +9,7 @@ out/
|
||||
dist/
|
||||
release/
|
||||
build/yomitan/
|
||||
coverage/
|
||||
|
||||
# Launcher build artifact (produced by make build-launcher)
|
||||
/subminer
|
||||
|
||||
@@ -83,7 +83,6 @@ This project uses Backlog.md MCP for all task and project management activities.
|
||||
- **When to read it**: BEFORE creating tasks, or when you're unsure whether to track work
|
||||
|
||||
These guides cover:
|
||||
|
||||
- Decision framework for when to create tasks
|
||||
- Search-first workflow to avoid duplicates
|
||||
- Links to detailed guides for task creation, execution, and finalization
|
||||
|
||||
266
Backlog.md
Normal file
266
Backlog.md
Normal file
@@ -0,0 +1,266 @@
|
||||
# Backlog
|
||||
|
||||
Purpose: lightweight repo-local task board. Seeded with current testing / coverage work.
|
||||
|
||||
Status keys:
|
||||
|
||||
- `todo`: not started
|
||||
- `doing`: in progress
|
||||
- `blocked`: waiting
|
||||
- `done`: shipped
|
||||
|
||||
Priority keys:
|
||||
|
||||
- `P0`: urgent / release-risk
|
||||
- `P1`: high value
|
||||
- `P2`: useful cleanup
|
||||
- `P3`: nice-to-have
|
||||
|
||||
## Active
|
||||
|
||||
| ID | Pri | Status | Area | Title |
|
||||
| ------ | --- | ------ | -------------- | --------------------------------------------------- |
|
||||
| SM-013 | P1 | done | review-followup | Address PR #36 CodeRabbit action items |
|
||||
|
||||
## Ready
|
||||
|
||||
| ID | Pri | Status | Area | Title |
|
||||
| ------ | --- | ------ | ----------------- | ---------------------------------------------------------------- |
|
||||
| SM-001 | P1 | todo | launcher | Add tests for CLI parser and args normalizer |
|
||||
| SM-002 | P1 | todo | immersion-tracker | Backfill tests for uncovered query exports |
|
||||
| SM-003 | P1 | todo | anki | Add focused field-grouping service + merge edge-case tests |
|
||||
| SM-004 | P2 | todo | tests | Extract shared test utils for deps factories and polling helpers |
|
||||
| SM-005 | P2 | todo | tests | Strengthen weak assertions in app-ready and IPC tests |
|
||||
| SM-006 | P2 | todo | tests | Break up monolithic youtube-flow and subtitle-sidebar tests |
|
||||
| SM-007 | P2 | todo | anilist | Add tests for AniList rate limiter |
|
||||
| SM-008 | P3 | todo | subtitles | Add core subtitle-position persistence/path tests |
|
||||
| SM-009 | P3 | todo | tokenizer | Add tests for JLPT token filter |
|
||||
| SM-010 | P1 | todo | immersion-tracker | Refactor storage + immersion-tracker service into focused modules |
|
||||
| SM-011 | P1 | done | tests | Add coverage reporting for maintained test lanes |
|
||||
| SM-012 | P2 | done | config/runtime | Replace JSON serialize-clone helpers with structured cloning |
|
||||
|
||||
## Icebox
|
||||
|
||||
None.
|
||||
|
||||
## Ticket Details
|
||||
|
||||
### SM-001
|
||||
|
||||
Title: Add tests for CLI parser and args normalizer
|
||||
Priority: P1
|
||||
Status: done
|
||||
Scope:
|
||||
|
||||
- `launcher/config/cli-parser-builder.ts`
|
||||
- `launcher/config/args-normalizer.ts`
|
||||
Acceptance:
|
||||
- root options parsing covered
|
||||
- subcommand routing covered
|
||||
- invalid action / invalid log level / invalid backend cases covered
|
||||
- target classification covered: file, directory, URL, invalid
|
||||
|
||||
### SM-002
|
||||
|
||||
Title: Backfill tests for uncovered query exports
|
||||
Priority: P1
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/core/services/immersion-tracker/query-*.ts`
|
||||
Targets:
|
||||
- headword helpers
|
||||
- anime/media detail helpers not covered by existing wrapper tests
|
||||
- lexical detail / appearance helpers
|
||||
- maintenance helpers beyond `deleteSession` and `upsertCoverArt`
|
||||
Acceptance:
|
||||
- every exported query helper either directly tested or explicitly justified as covered elsewhere
|
||||
- at least one focused regression per complex SQL branch / aggregation branch
|
||||
|
||||
### SM-003
|
||||
|
||||
Title: Add focused field-grouping service + merge edge-case tests
|
||||
Priority: P1
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/anki-integration/field-grouping.ts`
|
||||
- `src/anki-integration/field-grouping-merge.ts`
|
||||
Acceptance:
|
||||
- auto/manual/disabled flow branches covered
|
||||
- duplicate-card preview failure path covered
|
||||
- merge edge cases covered: empty fields, generated media fallback, strict grouped spans, audio synchronization
|
||||
|
||||
### SM-004
|
||||
|
||||
Title: Extract shared test utils for deps factories and polling helpers
|
||||
Priority: P2
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- common `makeDeps` / `createDeps` helpers
|
||||
- common `waitForCondition`
|
||||
Acceptance:
|
||||
- shared helper module added
|
||||
- at least 3 duplicated polling helpers removed
|
||||
- at least 5 duplicated deps factories consolidated or clearly prepared for follow-up migration
|
||||
|
||||
### SM-005
|
||||
|
||||
Title: Strengthen weak assertions in app-ready and IPC tests
|
||||
Priority: P2
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/core/services/app-ready.test.ts`
|
||||
- `src/core/services/ipc.test.ts`
|
||||
Acceptance:
|
||||
- replace broad `assert.ok(...)` presence checks with exact value / order assertions where expected value known
|
||||
- handler registration tests assert channel-specific behavior, not only existence
|
||||
|
||||
### SM-006
|
||||
|
||||
Title: Break up monolithic youtube-flow and subtitle-sidebar tests
|
||||
Priority: P2
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/main/runtime/youtube-flow.test.ts`
|
||||
- `src/renderer/modals/subtitle-sidebar.test.ts`
|
||||
Acceptance:
|
||||
- reduce single-test breadth
|
||||
- split largest tests into focused cases by behavior
|
||||
- keep semantics unchanged
|
||||
|
||||
### SM-007
|
||||
|
||||
Title: Add tests for AniList rate limiter
|
||||
Priority: P2
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/core/services/anilist/rate-limiter.ts`
|
||||
Acceptance:
|
||||
- capacity-window wait behavior covered
|
||||
- `x-ratelimit-remaining` + reset handling covered
|
||||
- `retry-after` handling covered
|
||||
|
||||
### SM-008
|
||||
|
||||
Title: Add core subtitle-position persistence/path tests
|
||||
Priority: P3
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/core/services/subtitle-position.ts`
|
||||
Acceptance:
|
||||
- save/load persistence covered
|
||||
- fallback behavior covered
|
||||
- path normalization behavior covered for URL vs local target
|
||||
|
||||
### SM-009
|
||||
|
||||
Title: Add tests for JLPT token filter
|
||||
Priority: P3
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/core/services/jlpt-token-filter.ts`
|
||||
Acceptance:
|
||||
- excluded term membership covered
|
||||
- ignored POS1 membership covered
|
||||
- exported list / entry consistency covered
|
||||
|
||||
### SM-010
|
||||
|
||||
Title: Refactor storage + immersion-tracker service into focused layers without API changes
|
||||
Priority: P1
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/core/database/storage/storage.ts`
|
||||
- `src/core/database/storage/schema.ts`
|
||||
- `src/core/database/storage/cover-blob.ts`
|
||||
- `src/core/database/storage/records.ts`
|
||||
- `src/core/database/storage/write-path.ts`
|
||||
- `src/core/services/immersion-tracker/youtube.ts`
|
||||
- `src/core/services/immersion-tracker/youtube-manager.ts`
|
||||
- `src/core/services/immersion-tracker/write-queue.ts`
|
||||
- `src/core/services/immersion-tracker/immersion-tracker-service.ts`
|
||||
|
||||
Acceptance:
|
||||
|
||||
- behavior and public API remain unchanged for all callers
|
||||
- `storage.ts` responsibilities split into DDL/migrations, cover blob helpers, record CRUD, and write-path execution
|
||||
- `immersion-tracker-service.ts` reduces to session state, media change orchestration, query proxies, and lifecycle
|
||||
- YouTube code split into pure utilities, a stateful manager (`YouTubeManager`), and a dedicated write queue (`WriteQueue`)
|
||||
- removed `storage.ts` is replaced with focused modules and updated imports
|
||||
- no API or migration regressions; existing tests for trackers/storage coverage remain green or receive focused updates
|
||||
|
||||
### SM-011
|
||||
|
||||
Title: Add coverage reporting for maintained test lanes
|
||||
Priority: P1
|
||||
Status: done
|
||||
Scope:
|
||||
|
||||
- `package.json`
|
||||
- CI workflow files under `.github/`
|
||||
- `docs/workflow/verification.md`
|
||||
Acceptance:
|
||||
- at least one maintained test lane emits machine-readable coverage output
|
||||
- CI surfaces coverage as an artifact, summary, or check output
|
||||
- local contributor path for coverage is documented
|
||||
- chosen coverage path works with Bun/TypeScript lanes already maintained by the repo
|
||||
Implementation note:
|
||||
- Added `bun run test:coverage:src` for the maintained source lane via a sharded coverage runner, with merged LCOV output at `coverage/test-src/lcov.info` and CI/release artifact upload as `coverage-test-src`.
|
||||
|
||||
### SM-012
|
||||
|
||||
Title: Replace JSON serialize-clone helpers with structured cloning
|
||||
Priority: P2
|
||||
Status: todo
|
||||
Scope:
|
||||
|
||||
- `src/runtime-options.ts`
|
||||
- `src/config/definitions.ts`
|
||||
- `src/config/service.ts`
|
||||
- `src/main/controller-config-update.ts`
|
||||
Acceptance:
|
||||
- runtime/config clone helpers stop using `JSON.parse(JSON.stringify(...))`
|
||||
- replacement preserves current behavior for plain config/runtime objects
|
||||
- focused tests cover clone/merge behavior that could regress during the swap
|
||||
- no new clone helper is introduced in these paths without a documented reason
|
||||
|
||||
Done:
|
||||
|
||||
- replaced JSON serialize-clone call sites in runtime/config/controller update paths with `structuredClone`
|
||||
- updated focused tests and fixtures to cover detached clone behavior and guard against regressions
|
||||
|
||||
### SM-013
|
||||
|
||||
Title: Address PR #36 CodeRabbit action items
|
||||
Priority: P1
|
||||
Status: done
|
||||
Scope:
|
||||
|
||||
- `plugins/subminer-workflow/skills/subminer-change-verification/scripts/verify_subminer_change.sh`
|
||||
- `scripts/subminer-change-verification.test.ts`
|
||||
- `src/core/services/immersion-tracker/query-sessions.ts`
|
||||
- `src/core/services/immersion-tracker/query-trends.ts`
|
||||
- `src/core/services/immersion-tracker/maintenance.ts`
|
||||
- `src/main/boot/services.ts`
|
||||
- `src/main/character-dictionary-runtime/zip.test.ts`
|
||||
Acceptance:
|
||||
- fix valid open CodeRabbit findings on PR #36
|
||||
- add focused regression coverage for behavior changes where practical
|
||||
- verify touched tests plus typecheck stay green
|
||||
|
||||
Done:
|
||||
|
||||
- hardened `--artifact-dir` validation in the verification script
|
||||
- fixed trend aggregation rounding and monthly ratio bucketing
|
||||
- preserved unwatched anime episodes in episode queries
|
||||
- restored seconds-based aggregate timestamps in shared maintenance
|
||||
- fixed the startup refactor compile break by making the predicates local at the call site
|
||||
- verified with `bun test src/core/services/immersion-tracker/__tests__/query.test.ts src/core/services/immersion-tracker/__tests__/query-split-modules.test.ts` and `bun run typecheck`
|
||||
21
CHANGELOG.md
21
CHANGELOG.md
@@ -1,5 +1,26 @@
|
||||
# Changelog
|
||||
|
||||
## v0.10.0 (2026-03-29)
|
||||
|
||||
### Changed
|
||||
- Integrations: Replaced the deprecated Discord Rich Presence wrapper with the maintained `@xhayper/discord-rpc` package.
|
||||
|
||||
### Fixed
|
||||
- Stats: Fixed stats startup so the immersion tracker can run when `Bun.serve` is unavailable.
|
||||
- Stats: Stats server now falls back to a Node `http` listener in Electron/runtime paths that do not expose Bun.
|
||||
- Overlay: Fixed the macOS visible-overlay toggle path so manual hides stay hidden and the plugin uses the explicit visible-overlay toggle command.
|
||||
- Subtitle Sidebar: Restored macOS mpv passthrough while the overlay subtitle sidebar is open so clicks outside the sidebar can refocus mpv and keep native keybindings working.
|
||||
|
||||
### Internal
|
||||
- Release: Added a maintained source coverage lane that shards Bun coverage one test file at a time and merges LCOV output into `coverage/test-src/lcov.info`.
|
||||
- Release: CI and release quality-gate now upload the merged source-lane LCOV artifact for inspection.
|
||||
- Runtime: Extracted remaining inline runtime logic from `src/main.ts` into dedicated runtime modules and composer helpers.
|
||||
- Runtime: Added focused regression tests for the extracted runtime/composer boundaries.
|
||||
- Runtime: Updated task tracking notes to mark TASK-238.6 complete and confirm follow-on boot-phase split can be deferred.
|
||||
- Runtime: Split `src/main.ts` boot wiring into dedicated `src/main/boot/services.ts`, `src/main/boot/runtimes.ts`, and `src/main/boot/handlers.ts` modules.
|
||||
- Runtime: Added focused tests for the new boot-phase seams and kept the startup/typecheck/build verification lanes green.
|
||||
- Runtime: Updated internal architecture/task docs to record the boot-phase split and new ownership boundary.
|
||||
|
||||
## v0.9.3 (2026-03-25)
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -63,6 +63,12 @@ Local stats dashboard — watch time, anime library, vocabulary growth, mining t
|
||||
|
||||
<br>
|
||||
|
||||
### Playlist Browser
|
||||
|
||||
Browse sibling episode files and the active mpv queue in one overlay modal. Open it with `Ctrl+Alt+P` to append episodes from the current directory, jump to queued items, remove entries, or reorder the playlist without leaving playback.
|
||||
|
||||
<br>
|
||||
|
||||
### Integrations
|
||||
|
||||
<table>
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
id: TASK-243
|
||||
title: 'Assess and address PR #36 latest CodeRabbit review round'
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 07:39'
|
||||
updated_date: '2026-03-29 07:41'
|
||||
labels:
|
||||
- code-review
|
||||
- pr-36
|
||||
dependencies: []
|
||||
references:
|
||||
- 'https://github.com/ksyasuda/SubMiner/pull/36'
|
||||
priority: high
|
||||
ordinal: 3600
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Inspect the latest CodeRabbit review round on PR #36, verify each actionable comment against the current branch, implement the confirmed fixes, and verify the touched paths.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Confirmed review comments are implemented or explicitly deferred with rationale.
|
||||
- [ ] #2 Touched paths are verified with the smallest sufficient test/build lane.
|
||||
- [ ] #3 Current PR feedback is reduced to resolved or intentionally deferred suggestions.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Addressed the confirmed latest CodeRabbit review items on PR #36. `scripts/run-coverage-lane.ts` now uses the Bun-style `import.meta.main` entrypoint check with a local ts-ignore to preserve the repo's CommonJS typecheck settings. `src/core/services/immersion-tracker/maintenance.ts` no longer shadows the imported `nowMs` helper in retention functions. `src/main.ts` now centralizes the startup-mode predicates behind a shared helper and releases `resolvedSource.cleanup` on the cached-subtitle fast path so materialized sources do not leak.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
id: TASK-244
|
||||
title: 'Assess and address PR #36 latest CodeRabbit review round 2'
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 08:09'
|
||||
updated_date: '2026-03-29 08:10'
|
||||
labels:
|
||||
- code-review
|
||||
- pr-36
|
||||
dependencies: []
|
||||
references:
|
||||
- 'https://github.com/ksyasuda/SubMiner/pull/36'
|
||||
priority: high
|
||||
ordinal: 3610
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Inspect the newest CodeRabbit review round on PR #36, verify the actionable comment against the current branch, implement the confirmed fix, and verify the touched path.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 The actionable review comment is implemented or explicitly deferred with rationale.
|
||||
- [ ] #2 Touched path is verified with the smallest sufficient test lane.
|
||||
- [ ] #3 Current PR feedback is reduced to resolved or intentionally deferred suggestions.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Addressed the actionable latest CodeRabbit comment on PR #36. `src/core/services/immersion-tracker/maintenance.ts` now skips retention deletions when a window is disabled with `Infinity`, so `toDbMs(...)` is only called for finite retention values. Added a regression test in `maintenance.test.ts` that verifies disabled retention windows preserve session events, telemetry, and sessions while returning zero deletions.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -1,11 +1,11 @@
|
||||
project_name: 'SubMiner'
|
||||
default_status: 'To Do'
|
||||
statuses: ['To Do', 'In Progress', 'Done']
|
||||
project_name: "SubMiner"
|
||||
default_status: "To Do"
|
||||
statuses: ["To Do", "In Progress", "Done"]
|
||||
labels: []
|
||||
definition_of_done: []
|
||||
date_format: yyyy-mm-dd
|
||||
max_column_width: 20
|
||||
default_editor: 'nvim'
|
||||
default_editor: "nvim"
|
||||
auto_open_browser: false
|
||||
default_port: 6420
|
||||
remote_operations: true
|
||||
@@ -13,4 +13,4 @@ auto_commit: false
|
||||
bypass_git_hooks: false
|
||||
check_active_branches: true
|
||||
active_branch_days: 30
|
||||
task_prefix: 'task'
|
||||
task_prefix: "task"
|
||||
|
||||
8
backlog/milestones/m-2 - mining-workflow-upgrades.md
Normal file
8
backlog/milestones/m-2 - mining-workflow-upgrades.md
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
id: m-2
|
||||
title: 'Mining Workflow Upgrades'
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
Future user-facing workflow improvements that directly improve discoverability, previewability, and mining control without depending on speculative platform integrations like OCR, marketplace infrastructure, or cloud sync.
|
||||
@@ -0,0 +1,59 @@
|
||||
---
|
||||
id: TASK-238
|
||||
title: Codebase health follow-up: decompose remaining oversized runtime surfaces
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- tech-debt
|
||||
- maintainability
|
||||
- runtime
|
||||
milestone: m-0
|
||||
dependencies: []
|
||||
references:
|
||||
- src/main.ts
|
||||
- src/types.ts
|
||||
- src/main/character-dictionary-runtime.ts
|
||||
- src/core/services/immersion-tracker/query.ts
|
||||
- backlog/tasks/task-87 - Codebase-health-harden-verification-and-retire-dead-architecture-identified-in-the-March-2026-review.md
|
||||
- backlog/completed/task-87.4 - Runtime-composition-root-remove-dead-symbols-and-tighten-module-boundaries-in-src-main.ts.md
|
||||
- backlog/completed/task-87.6 - Anki-integration-maintainability-continue-decomposing-the-oversized-orchestration-layer.md
|
||||
- backlog/tasks/task-238.6 - Extract-remaining-inline-runtime-logic-and-composer-gaps-from-src-main.ts.md
|
||||
- backlog/tasks/task-238.7 - Split-src-main.ts-into-boot-phase-services-runtimes-and-handlers.md
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Follow up the March 2026 codebase-health work with a narrower pass over the biggest remaining production hotspots. The latest review correctly flags `src/main.ts` and `src/types.ts` as maintainability pressure, but it also misses the next real large surfaces that will keep slowing future work: `src/main/character-dictionary-runtime.ts` and `src/core/services/immersion-tracker/query.ts`. This parent task should track focused decomposition work that preserves behavior, avoids redoing already-completed dead-architecture cleanup, and keeps each slice small enough for isolated implementation.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Child tasks exist for each focused cleanup slice instead of one broad “split the monoliths” effort.
|
||||
- [ ] #2 The parent task records sequencing so agents do not overlap on `src/main.ts` and other shared surfaces.
|
||||
- [ ] #3 The selected follow-up tasks target still-live pressure points, not already-completed work like TASK-87.4, TASK-87.5, or TASK-87.6.
|
||||
- [ ] #4 Completion of the child tasks leaves runtime wiring, shared types, character-dictionary orchestration, and immersion-tracker queries materially easier to review and extend.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
Recommended sequencing:
|
||||
|
||||
1. Start TASK-238.3 first. A compatibility-first type split reduces churn risk for the later runtime/query refactors.
|
||||
2. Run TASK-238.4 and TASK-238.5 in parallel after TASK-238.3 if desired; they touch different domains.
|
||||
3. Run TASK-238.1 after or alongside the domain refactors, but keep it focused on window/bootstrap composition only.
|
||||
4. Run TASK-238.2 after TASK-238.1 because both touch `src/main.ts` and the CLI/headless flow should build on the cleaner composition root.
|
||||
5. Run TASK-238.6 after the current composer/setup-window-factory work lands, so the remaining inline runtime logic and composer gaps are extracted from the already-cleaned composition root.
|
||||
6. Run TASK-238.7 only after TASK-238.6 confirms the remaining entrypoint surface still justifies a boot-phase split; then move the boot wiring into dedicated service/runtime/handler modules.
|
||||
|
||||
Shared guardrails:
|
||||
|
||||
- Do not reopen already-completed dead-module cleanup from TASK-87.5 unless new evidence appears.
|
||||
- Keep `src/types.ts` migration compatibility-first; avoid a repo-wide import churn bomb.
|
||||
- Prefer extracting named runtime/domain modules over moving code into new giant helper files.
|
||||
- Verify each slice with the cheapest sufficient lane, then escalate when a task crosses runtime/build boundaries.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,45 @@
|
||||
---
|
||||
id: TASK-238.1
|
||||
title: Extract main-window and overlay-window composition from src/main.ts
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- tech-debt
|
||||
- runtime
|
||||
- windows
|
||||
- maintainability
|
||||
milestone: m-0
|
||||
dependencies: []
|
||||
references:
|
||||
- src/main.ts
|
||||
- src/main/runtime/composers
|
||||
- src/main/runtime/overlay-runtime-bootstrap.ts
|
||||
- docs/architecture/README.md
|
||||
parent_task_id: TASK-238
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
`src/main.ts` still directly owns several `BrowserWindow` construction and window-lifecycle paths, including overlay-adjacent windows and setup flows. That keeps the composition root far larger than intended and makes window behavior hard to test in isolation. Extract the remaining window/bootstrap composition into named runtime modules so `src/main.ts` mostly wires dependencies and app lifecycle events together.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 At least the main overlay window path plus two other window/setup flows are extracted from direct `BrowserWindow` construction inside `src/main.ts`.
|
||||
- [ ] #2 The extracted modules expose narrow factory/handler APIs that can be tested without booting the whole app.
|
||||
- [ ] #3 `src/main.ts` becomes materially smaller and easier to scan, with window creation concentrated behind well-named runtime surfaces.
|
||||
- [ ] #4 Relevant runtime/window tests pass, and new tests are added for any newly isolated window composition helpers.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Map the remaining direct `BrowserWindow` creation sites in `src/main.ts` and group them by shared lifecycle concerns.
|
||||
2. Extract coherent modules for construction, preload/path resolution, and open/focus/reuse behavior rather than moving raw option objects wholesale.
|
||||
3. Update the composition root to consume the new modules and keep side effects/app state ownership explicit.
|
||||
4. Verify with focused runtime/window tests plus `bun run typecheck`.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,46 @@
|
||||
---
|
||||
id: TASK-238.2
|
||||
title: Extract CLI and headless command wiring from src/main.ts
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- tech-debt
|
||||
- cli
|
||||
- runtime
|
||||
- maintainability
|
||||
milestone: m-0
|
||||
dependencies:
|
||||
- TASK-238.1
|
||||
references:
|
||||
- src/main.ts
|
||||
- src/main/cli-runtime.ts
|
||||
- src/cli/args.ts
|
||||
- launcher
|
||||
parent_task_id: TASK-238
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
`src/main.ts` still owns the headless-initial-command flow, argument handling, and a large amount of CLI/runtime bridging. That makes non-window startup paths difficult to reason about and keeps CLI behavior coupled to unrelated desktop boot logic. Extract the remaining CLI/headless orchestration into dedicated runtime services so the main entrypoint only decides which startup path to invoke.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 CLI parsing, initial-command dispatch, and headless command execution no longer live as large inline flows in `src/main.ts`.
|
||||
- [ ] #2 The new modules make the desktop startup path and headless startup path visibly separate and easier to test.
|
||||
- [ ] #3 Existing CLI behaviors remain unchanged, including help output and startup gating behavior.
|
||||
- [ ] #4 Targeted CLI/runtime tests cover the extracted path, and `bun run typecheck` passes.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Map the current `parseArgs` / `handleInitialArgs` / `runHeadlessInitialCommand` / `handleCliCommand` flow in `src/main.ts`.
|
||||
2. Extract a small startup-path selector plus dedicated runtime services for headless execution and interactive startup dispatch.
|
||||
3. Keep Electron app ownership in `src/main.ts`; move only CLI orchestration and context assembly.
|
||||
4. Verify with CLI-focused tests plus `bun run typecheck`.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,59 @@
|
||||
---
|
||||
id: TASK-238.3
|
||||
title: Introduce domain type entrypoints and shrink src/types.ts import surface
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
updated_date: '2026-03-27 00:14'
|
||||
labels:
|
||||
- tech-debt
|
||||
- types
|
||||
- maintainability
|
||||
milestone: m-0
|
||||
dependencies: []
|
||||
references:
|
||||
- src/types.ts
|
||||
- src/shared/ipc/contracts.ts
|
||||
- src/config/service.ts
|
||||
- docs/architecture/README.md
|
||||
parent_task_id: TASK-238
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
`src/types.ts` has become the repo-wide dumping ground for unrelated domains. Splitting it is still worthwhile, but a big-bang move would create noisy churn across a large import graph. Introduce domain entrypoints under `src/types/` and migrate the highest-churn imports first while leaving `src/types.ts` as a compatibility barrel until the new structure is proven.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Domain-focused type modules exist for the main clusters currently mixed together in `src/types.ts` (for example Anki, config/runtime, subtitle/media, and integration/runtime-option types).
|
||||
- [x] #2 `src/types.ts` becomes a thinner compatibility layer or barrel instead of the sole source of truth for every shared type.
|
||||
- [x] #3 A meaningful set of imports is migrated to the new entrypoints without breaking the maintained typecheck/test lanes.
|
||||
- [x] #4 The new structure is documented well enough that contributors can tell where new shared types should live.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Inventory the main type clusters in `src/types.ts` and choose stable domain seams.
|
||||
2. Create `src/types/` modules and re-export through `src/types.ts` so the migration can be incremental.
|
||||
3. Migrate the highest-value import sites first, especially config/runtime and Anki-heavy surfaces.
|
||||
4. Verify with `bun run typecheck` and the cheapest test lane covering touched domains.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Implemented domain entrypoints under `src/types/` and kept `src/types.ts` as a compatibility barrel (`src/types/anki.ts`, `src/types/config.ts`, `src/types/integrations.ts`, `src/types/runtime.ts`, `src/types/runtime-options.ts`, `src/types/subtitle.ts`). Migrated the highest-value import surfaces away from `src/types.ts` in config/runtime/Anki-related modules and shared IPC surfaces. Added type-level regression coverage in `src/types-domain-entrypoints.type-test.ts`.
|
||||
|
||||
Aligned docs in `docs/architecture/README.md`, `docs/architecture/domains.md`, and `docs-site/changelog.md` to support the change and clear docs-site sync mismatch.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Task completed with commit `5dd8bb7f` (`refactor: split shared type entrypoints`). The refactor introduced domain type entrypoints, shrank the `src/types.ts` import surface, updated import consumers, and recorded verification evidence in the local verifier artifacts. Backlog now tracks TASK-238.3 as done.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,58 @@
|
||||
---
|
||||
id: TASK-238.4
|
||||
title: Decompose character dictionary runtime into fetch, build, and cache modules
|
||||
status: Done
|
||||
updated_date: '2026-03-27 00:20'
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- tech-debt
|
||||
- runtime
|
||||
- anilist
|
||||
- maintainability
|
||||
milestone: m-0
|
||||
dependencies:
|
||||
- TASK-238.3
|
||||
references:
|
||||
- src/main/character-dictionary-runtime.ts
|
||||
- src/main/runtime/character-dictionary-auto-sync.ts
|
||||
- docs/architecture/README.md
|
||||
parent_task_id: TASK-238
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
`src/main/character-dictionary-runtime.ts` is now one of the largest live production files in the repo and combines AniList transport, name normalization, snapshot/image shaping, cache management, and zip packaging. That file will keep growing as character-dictionary features evolve. Split it into focused modules so the runtime surface becomes orchestration instead of a catch-all implementation blob.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 AniList fetch/parsing logic, dictionary-entry building, and snapshot/cache/zip persistence no longer live in one giant file.
|
||||
- [x] #2 The public runtime API stays behavior-compatible for current callers.
|
||||
- [x] #3 The top-level runtime/orchestration file becomes materially smaller and easier to review.
|
||||
- [x] #4 Existing character-dictionary tests still pass, and new focused tests cover the extracted modules where needed.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Identify the dominant concern boundaries inside `src/main/character-dictionary-runtime.ts`.
|
||||
2. Extract fetch/transform/persist modules with narrow interfaces, keeping data-shape ownership explicit.
|
||||
3. Leave the exported runtime API stable for current main-process callers.
|
||||
4. Verify with the maintained character-dictionary/runtime test lane plus `bun run typecheck`.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Split `src/main/character-dictionary-runtime.ts` into focused modules under `src/main/character-dictionary-runtime/` (`fetch`, `build`, `cache`, plus helper modules). The orchestrator stayed as a compatibility shim/API surface with delegated module functions. Added focused tests for cache snapshot semantics and term rebuild + collapsible-open-state behavior in the new modules. Updated runtime architecture docs in `docs/architecture/domains.md` and `docs-site/architecture.md`.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Task completed with commit `5b06579e` (`refactor: split character dictionary runtime modules`). Runtime refactor landed with regression coverage and verification including runtime-compat lanes, and all changed behavior was validated as API-compatible for callers.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,61 @@
|
||||
---
|
||||
id: TASK-238.5
|
||||
title: Split immersion tracker query layer into focused read-model modules
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-26 20:49'
|
||||
updated_date: '2026-03-27 00:00'
|
||||
labels:
|
||||
- tech-debt
|
||||
- stats
|
||||
- database
|
||||
- maintainability
|
||||
milestone: m-0
|
||||
dependencies:
|
||||
- TASK-238.3
|
||||
references:
|
||||
- src/core/services/immersion-tracker/query.ts
|
||||
- src/core/services/stats-server.ts
|
||||
- src/core/services/immersion-tracker-service.ts
|
||||
parent_task_id: TASK-238
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
`src/core/services/immersion-tracker/query.ts` has grown into a large mixed read/write/maintenance surface that owns library queries, timeline/detail queries, cleanup helpers, and rollup rebuild hooks. That size makes stats work harder to change safely. Split the query layer into focused read-model and maintenance modules so future stats/dashboard work does not keep landing in one 2500-line file.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Query responsibilities are grouped into focused modules such as library/session detail, vocabulary/kanji detail, and maintenance/cleanup helpers.
|
||||
- [x] #2 The stats server and immersion tracker service depend on stable exported query surfaces instead of one monolithic file.
|
||||
- [x] #3 The refactor preserves current SQL behavior and existing statistics outputs.
|
||||
- [x] #4 Existing stats/immersion tests still pass, with added focused coverage where extraction creates new seams.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Inventory the major query clusters and choose modules that match current caller boundaries.
|
||||
2. Extract without changing schema or response contracts unless a narrow cleanup is required for compile/test health.
|
||||
3. Keep SQL ownership close to the domain module that consumes it; avoid a giant `queries/` dump with no structure.
|
||||
4. Verify with the maintained stats/immersion test lane plus `bun run typecheck`.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Split the monolithic query surface into focused read-model modules for sessions, trends, lexical data, library lookups, and maintenance helpers. Updated the service and test imports to use the new module boundaries.
|
||||
|
||||
Verification: `bun run typecheck` passed. Focused query and stats-server tests passed, including the `stats-server.test.ts` coverage around the new Bun fallback path.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Extracted the immersion-tracker query layer into smaller read-model modules and kept the compatibility barrel in place so existing call sites can transition cleanly. Added focused coverage and verified the refactor with typecheck plus targeted tests.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,84 @@
|
||||
---
|
||||
id: TASK-238.6
|
||||
title: Extract remaining inline runtime logic and composer gaps from src/main.ts
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-27 00:00'
|
||||
updated_date: '2026-03-27 22:13'
|
||||
labels:
|
||||
- tech-debt
|
||||
- runtime
|
||||
- maintainability
|
||||
- composers
|
||||
milestone: m-0
|
||||
dependencies:
|
||||
- TASK-238.1
|
||||
- TASK-238.2
|
||||
references:
|
||||
- src/main.ts
|
||||
- src/main/runtime/youtube-flow.ts
|
||||
- src/main/runtime/autoplay-ready-gate.ts
|
||||
- src/main/runtime/subtitle-prefetch-init.ts
|
||||
- src/main/runtime/discord-presence-runtime.ts
|
||||
- src/main/overlay-modal-state.ts
|
||||
- src/main/runtime/composers
|
||||
parent_task_id: TASK-238
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
`src/main.ts` still mixes two concerns: pure dependency wiring and inline runtime logic. The earlier composer extractions reduce the wiring burden, but the file still owns several substantial behavior blocks and a few large inline dependency groupings. This task tracks the next maintainability pass: move the remaining runtime logic into the appropriate domain modules, add missing composer wrappers for the biggest grouped handler blocks, and reassess whether a boot-phase split is still necessary after the entrypoint becomes mostly wiring.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 `runYoutubePlaybackFlow`, `maybeSignalPluginAutoplayReady`, `refreshSubtitlePrefetchFromActiveTrack`, `publishDiscordPresence`, and `handleModalInputStateChange` no longer live as substantial inline logic in `src/main.ts`.
|
||||
- [x] #2 The large subtitle/prefetch, stats startup, and overlay visibility dependency groupings are wrapped behind named composer helpers instead of remaining inline in `src/main.ts`.
|
||||
- [x] #3 `src/main.ts` reads primarily as a boot and lifecycle coordinator, with domain behavior concentrated in named runtime modules.
|
||||
- [x] #4 Focused tests cover the extracted behavior or the new composer surfaces.
|
||||
- [x] #5 The task records whether the remaining size still justifies a boot-phase split or whether that follow-up can wait.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
Recommended sequence:
|
||||
|
||||
1. Let the current composer and `setup-window-factory` work land first so this slice starts from a stable wiring baseline.
|
||||
2. Extract the five inline runtime functions into their natural domain modules or direct equivalents.
|
||||
3. Add or extend composer helpers for subtitle/prefetch, stats startup, and overlay visibility handler grouping.
|
||||
4. Re-scan `src/main.ts` after the extraction and decide whether a boot-phase split is still the right next task.
|
||||
5. Verify the extracted behavior with focused tests first, then run the relevant broader runtime gate if the slice crosses startup boundaries.
|
||||
|
||||
Guardrails:
|
||||
|
||||
- Keep the work behavior-preserving.
|
||||
- Prefer moving logic to existing runtime surfaces over creating new giant helper files.
|
||||
- Do not expand into unrelated `src/main.ts` cleanup that is already tracked by other TASK-238 slices.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Extracted the remaining inline runtime seams from `src/main.ts` into focused runtime modules:
|
||||
`src/main/runtime/youtube-playback-runtime.ts`,
|
||||
`src/main/runtime/autoplay-ready-gate.ts`,
|
||||
`src/main/runtime/subtitle-prefetch-runtime.ts`,
|
||||
`src/main/runtime/discord-presence-runtime.ts`,
|
||||
and `src/main/runtime/overlay-modal-input-state.ts`.
|
||||
|
||||
Added named composer wrappers for the grouped subtitle/prefetch, stats startup, and overlay visibility wiring in `src/main/runtime/composers/`.
|
||||
|
||||
Re-scan result for the boot-phase split follow-up: the entrypoint is materially closer to a boot/lifecycle coordinator now, so TASK-238.7 remains a valid future cleanup but no longer feels urgent or blocking for maintainability.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
TASK-238.6 is complete. Verification passed with `bun run typecheck`, focused runtime/composer tests, `bun run test:fast`, `bun run test:env`, and `bun run build`. The remaining `src/main.ts` work is now better isolated behind runtime modules and composer helpers, and the boot-phase split can wait for a later cleanup pass instead of being treated as immediate follow-on work.
|
||||
|
||||
Backlog completion now includes changelog artifact `changes/2026-03-27-task-238.6-main-runtime-refactor.md` under runtime internals.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,85 @@
|
||||
---
|
||||
id: TASK-238.7
|
||||
title: Split src/main.ts into boot-phase services, runtimes, and handlers
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-27 00:00'
|
||||
updated_date: '2026-03-27 22:45'
|
||||
labels:
|
||||
- tech-debt
|
||||
- runtime
|
||||
- maintainability
|
||||
- architecture
|
||||
milestone: m-0
|
||||
dependencies:
|
||||
- TASK-238.6
|
||||
references:
|
||||
- src/main.ts
|
||||
- src/main/boot/services.ts
|
||||
- src/main/boot/runtimes.ts
|
||||
- src/main/boot/handlers.ts
|
||||
- src/main/runtime/composers
|
||||
parent_task_id: TASK-238
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
After the remaining inline runtime logic and composer gaps are extracted, `src/main.ts` should be split along boot-phase boundaries so the entrypoint stops mixing service construction, domain runtime composition, and handler wiring in one file. This task tracks that structural split: move service instantiation, runtime composition, and handler orchestration into dedicated boot modules, then leave `src/main.ts` as a thin lifecycle coordinator with clear startup-path selection.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Service instantiation lives in a dedicated boot module instead of a large inline setup block in `src/main.ts`.
|
||||
- [x] #2 Domain runtime composition lives in a dedicated boot module, separate from lifecycle and handler dispatch.
|
||||
- [x] #3 Handler/composer invocation lives in a dedicated boot module, with `src/main.ts` reduced to app lifecycle and startup-path selection.
|
||||
- [x] #4 Existing startup behavior remains unchanged across desktop and headless flows.
|
||||
- [x] #5 Focused tests cover the split surfaces, and the relevant runtime/typecheck gate passes.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
Recommended sequence:
|
||||
|
||||
1. Re-scan `src/main.ts` after TASK-238.6 lands and mark the remaining boot-phase seams by responsibility.
|
||||
2. Extract service instantiation into `src/main/boot/services.ts` or equivalent.
|
||||
3. Extract runtime composition into `src/main/boot/runtimes.ts` or equivalent.
|
||||
4. Extract handler/composer orchestration into `src/main/boot/handlers.ts` or equivalent.
|
||||
5. Shrink `src/main.ts` to startup-path selection, app lifecycle hooks, and minimal boot wiring.
|
||||
6. Verify the split with focused entrypoint/runtime tests first, then run the broader runtime gate if the refactor crosses startup boundaries.
|
||||
|
||||
Guardrails:
|
||||
|
||||
- Keep the split behavior-preserving.
|
||||
- Prefer small boot modules with narrow ownership over a new monolithic bootstrap layer.
|
||||
- Do not reopen the inline logic work already tracked by TASK-238.6 unless a remaining seam truly belongs here.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Added boot-phase modules under `src/main/boot/`:
|
||||
`services.ts` for config/user-data/runtime-registry/overlay bootstrap service construction,
|
||||
`runtimes.ts` for named runtime/composer entrypoints and grouped boot-phase seams,
|
||||
and `handlers.ts` for handler/composer boot entrypoints.
|
||||
|
||||
Rewired `src/main.ts` to source boot-phase service construction from `createMainBootServices(...)` and to route runtime/handler composition through boot-level exports instead of keeping the entrypoint as the direct owner of every composition import.
|
||||
|
||||
Added focused tests for the new boot seams in
|
||||
`src/main/boot/services.test.ts`,
|
||||
`src/main/boot/runtimes.test.ts`,
|
||||
and `src/main/boot/handlers.test.ts`.
|
||||
|
||||
Updated internal architecture docs to note that `src/main/boot/` now owns boot-phase assembly seams so `src/main.ts` can stay centered on lifecycle coordination and startup-path selection.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
TASK-238.7 is complete. Verification passed with focused boot tests, `bun run typecheck`, `bun run test:fast`, and `bun run build`. `src/main.ts` still acts as the composition root, but the boot-phase split now moves service instantiation, runtime composition seams, and handler composition seams into dedicated `src/main/boot/*` modules so the entrypoint reads more like a lifecycle coordinator than a single monolithic bootstrap file.
|
||||
|
||||
Backlog completion now includes changelog artifact `changes/2026-03-27-task-238.7-main-boot-split.md` for the internal runtime architecture pass.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,51 @@
|
||||
---
|
||||
id: TASK-239
|
||||
title: Mining workflow upgrades: prioritize high-value user-facing improvements
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- feature
|
||||
- ux
|
||||
- planning
|
||||
milestone: m-2
|
||||
dependencies: []
|
||||
references:
|
||||
- src/main.ts
|
||||
- src/renderer
|
||||
- src/anki-integration.ts
|
||||
- src/config/service.ts
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Track the next set of high-value workflow improvements surfaced by the March 2026 review. The goal is to capture bounded, implementation-sized feature slices with clear user value and avoid prematurely committing to much larger bets like hard-sub OCR, plugin marketplace infrastructure, or cloud config sync. Focus this parent task on features that improve the core mining workflow directly: profile-aware setup, action discoverability, previewing output before mining, and selecting richer subtitle ranges.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Child tasks exist for the selected near-to-medium-term workflow upgrades with explicit scope and exclusions.
|
||||
- [ ] #2 The parent task records the recommended sequencing so future work starts with the best value/risk ratio.
|
||||
- [ ] #3 The tracked feature set stays grounded in existing product surfaces instead of speculative external-platform integrations.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
Recommended sequencing:
|
||||
|
||||
1. Start TASK-239.3 first. Template preview is the smallest high-signal UX win on a core mining path.
|
||||
2. Start TASK-239.2 next. A command palette improves discoverability across existing actions without large backend upheaval.
|
||||
3. Start TASK-239.4 after the preview/palette work. Sentence clipping is high-value but touches runtime, subtitle selection, and card creation flows together.
|
||||
4. Keep TASK-239.1 as a foundation project and scope it narrowly to local multi-profile support. Do not expand it into cloud sync in the same slice.
|
||||
|
||||
Deliberate exclusions for now:
|
||||
|
||||
- hard-sub OCR
|
||||
- plugin marketplace infrastructure
|
||||
- cloud/device sync
|
||||
- site-specific streaming source auto-detection beyond narrow discovery spikes
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,46 @@
|
||||
---
|
||||
id: TASK-239.1
|
||||
title: Add profile-aware config foundations and profile selection flow
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- feature
|
||||
- config
|
||||
- launcher
|
||||
- ux
|
||||
milestone: m-2
|
||||
dependencies: []
|
||||
references:
|
||||
- src/config/service.ts
|
||||
- src/config/load.ts
|
||||
- launcher/config.ts
|
||||
- src/main.ts
|
||||
parent_task_id: TASK-239
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Introduce the foundation for local multi-profile use so users can keep separate setups for different workflows without hand-editing or swapping config files manually. Keep the first slice intentionally narrow: named local profiles, explicit selection, separate config/data paths, and safe migration from the current single-profile setup. Do not couple this task to cloud sync or remote profile sharing.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Users can create/select a named local profile and launch SubMiner against that profile explicitly.
|
||||
- [ ] #2 Each profile uses separate config and data storage paths for settings and profile-scoped runtime state that should not bleed across workflows.
|
||||
- [ ] #3 Existing single-profile users migrate safely to a default profile without losing settings.
|
||||
- [ ] #4 The active profile is visible in the launcher/app surface where it materially affects user behavior.
|
||||
- [ ] #5 Tests cover profile resolution, migration/defaulting behavior, and at least one end-to-end selection path.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Design a minimal profile storage layout and resolution strategy that works for launcher and desktop runtime entrypoints.
|
||||
2. Add profile selection plumbing before changing feature behavior inside individual services.
|
||||
3. Migrate config/data-path resolution to be profile-aware while preserving a safe default-profile fallback.
|
||||
4. Verify with config/launcher tests plus targeted runtime coverage.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,46 @@
|
||||
---
|
||||
id: TASK-239.2
|
||||
title: Add a searchable command palette for desktop actions
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- feature
|
||||
- ux
|
||||
- desktop
|
||||
- shortcuts
|
||||
milestone: m-2
|
||||
dependencies: []
|
||||
references:
|
||||
- src/renderer
|
||||
- src/shared/ipc/contracts.ts
|
||||
- src/main/runtime/overlay-runtime-options.ts
|
||||
- src/main.ts
|
||||
parent_task_id: TASK-239
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
SubMiner already exposes many actions through scattered shortcuts, menus, and modal flows. Add a searchable command palette so users can discover and execute high-value desktop actions from one keyboard-first surface. Build on the existing runtime-options/modal infrastructure where practical instead of creating a completely separate interaction model.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 A keyboard-accessible command palette opens from the desktop app and lists supported actions with searchable labels.
|
||||
- [ ] #2 Commands are backed by an explicit registry so action availability and labels are not hard-coded in one renderer component.
|
||||
- [ ] #3 Users can navigate and execute commands entirely from the keyboard.
|
||||
- [ ] #4 The first slice includes the highest-value existing actions rather than trying to cover every possible command on day one.
|
||||
- [ ] #5 Tests cover command filtering, execution dispatch, and at least one disabled/unavailable command state.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Define a small command-registry contract shared across renderer and main-process dispatch.
|
||||
2. Reuse existing modal/runtime plumbing where it fits so the palette is a thin discoverability layer over current actions.
|
||||
3. Ship a narrow but useful initial command set, then expand later based on usage.
|
||||
4. Verify with renderer tests plus targeted IPC/runtime tests.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,45 @@
|
||||
---
|
||||
id: TASK-239.3
|
||||
title: Add live Anki template preview for card output
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- feature
|
||||
- anki
|
||||
- ux
|
||||
milestone: m-2
|
||||
dependencies: []
|
||||
references:
|
||||
- src/anki-integration.ts
|
||||
- src/anki-integration/card-creation.ts
|
||||
- src/config/resolve/anki-connect.ts
|
||||
- src/renderer
|
||||
parent_task_id: TASK-239
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Users currently have to infer what card output will look like from config fields and post-mine results. Add a live preview surface that shows the resolved card template output before mining so users can catch broken field mappings, missing media, or undesirable formatting earlier.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Users can open a preview that renders the resolved front/back field output for the current note/card template configuration.
|
||||
- [ ] #2 The preview clearly surfaces missing or unmapped fields instead of silently showing blank content.
|
||||
- [ ] #3 Preview generation uses the same transformation logic as the live card-creation path so it stays trustworthy.
|
||||
- [ ] #4 The first slice works with representative sample mining payloads and handles missing optional media gracefully.
|
||||
- [ ] #5 Tests cover preview rendering for at least one valid and one invalid/missing-field configuration.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Identify the current card-creation data path and extract any logic needed to render a preview without duplicating transformation rules.
|
||||
2. Add a focused preview UI in the most relevant existing configuration/setup surface.
|
||||
3. Surface validation/warning states for empty mappings, missing fields, and media-dependent outputs.
|
||||
4. Verify with Anki integration tests plus renderer coverage for preview states.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,46 @@
|
||||
---
|
||||
id: TASK-239.4
|
||||
title: Add sentence clipping from arbitrary subtitle ranges
|
||||
status: To Do
|
||||
assignee: []
|
||||
created_date: '2026-03-26 20:49'
|
||||
labels:
|
||||
- feature
|
||||
- subtitle
|
||||
- anki
|
||||
- ux
|
||||
milestone: m-2
|
||||
dependencies: []
|
||||
references:
|
||||
- src/renderer/modals/subtitle-sidebar.ts
|
||||
- src/main/runtime/subtitle-position.ts
|
||||
- src/anki-integration/card-creation.ts
|
||||
- src/main/runtime/mpv-main-event-actions.ts
|
||||
parent_task_id: TASK-239
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Current mining flows are optimized around the active subtitle line. Add a sentence-clipping workflow that lets users select an arbitrary contiguous subtitle range, preview the combined text/timing, and mine from that selection. This should improve multi-line dialogue capture without forcing manual copy/paste or separate post-processing.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Users can select a contiguous subtitle range from the existing subtitle UI instead of being limited to the active cue.
|
||||
- [ ] #2 The workflow previews the combined text and resulting timing range before mining.
|
||||
- [ ] #3 Mining from a clipped range uses the combined subtitle payload in card generation while preserving existing single-line behavior.
|
||||
- [ ] #4 The feature handles overlapping/edge timing cases predictably and does not corrupt the normal active-cue flow.
|
||||
- [ ] #5 Tests cover range selection, combined payload generation, and at least one card-creation path using a clipped selection.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Define a selection model that fits the existing subtitle sidebar/runtime data flow.
|
||||
2. Add preview + confirmation UI before routing the clipped payload into mining.
|
||||
3. Keep the existing single-line path intact and treat clipping as an additive workflow.
|
||||
4. Verify with subtitle-sidebar, runtime, and Anki/card-creation tests.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
@@ -0,0 +1,81 @@
|
||||
---
|
||||
id: TASK-240
|
||||
title: Migrate SubMiner agent skills into a repo-local plugin workflow
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-26 00:00'
|
||||
updated_date: '2026-03-26 23:23'
|
||||
labels:
|
||||
- skills
|
||||
- plugin
|
||||
- workflow
|
||||
- backlog
|
||||
- tooling
|
||||
dependencies:
|
||||
- TASK-159
|
||||
- TASK-160
|
||||
priority: high
|
||||
ordinal: 24000
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
|
||||
Turn the current SubMiner-specific repo skills into a reproducible repo-local plugin workflow. The plugin should become the canonical source of truth for the SubMiner scrum-master and change-verification skills, bundle the scripts and metadata needed to test and validate changes, and preserve compatibility for existing repo references through thin `.agents/skills/` shims while the migration settles.
|
||||
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
|
||||
- [x] #1 A repo-local plugin scaffold exists for the SubMiner workflow, with manifest and marketplace metadata wired according to the repo-local plugin layout.
|
||||
- [x] #2 `subminer-scrum-master` and `subminer-change-verification` live under the plugin as the canonical skill sources, along with any helper scripts or supporting files needed for reproducible use.
|
||||
- [x] #3 Existing repo-level `.agents/skills/` entrypoints are reduced to compatibility shims or redirects instead of remaining as duplicate sources of truth.
|
||||
- [x] #4 The plugin-owned workflow explicitly documents backlog-first orchestration and change verification expectations, including how the skills work together.
|
||||
- [x] #5 The migration is validated with the cheapest sufficient repo-native verification lane and the task records the exact commands and any skips/blockers.
|
||||
<!-- SECTION:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
|
||||
1. Inspect the plugin-creator contract and current repo skill/script layout, then choose the plugin name, directory structure, and migration boundaries.
|
||||
2. Scaffold a repo-local plugin plus marketplace entry, keeping the plugin payload under `plugins/<name>/` and the catalog entry under `.agents/plugins/marketplace.json`.
|
||||
3. Move the two SubMiner-specific skills and their helper scripts into the plugin as the canonical source, adding any plugin docs or supporting metadata needed for reproducible testing/validation.
|
||||
4. Replace the existing `.agents/skills/subminer-*` surfaces with minimal compatibility shims that point agents at the plugin-owned sources without duplicating logic.
|
||||
5. Update internal docs or references that should now describe the plugin-first workflow.
|
||||
6. Run the cheapest sufficient verification lane for plugin/internal-doc changes and record the results in this task.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
|
||||
2026-03-26: User approved the migration shape where the plugin becomes the canonical source of truth and `.agents/skills/` stays only as compatibility shims. Repo-local plugin chosen over home-local plugin.
|
||||
|
||||
2026-03-26: Backlog MCP resources/tools are not available in this Codex session (`MCP startup failed`), so this task is being initialized directly in the repo-local `backlog/` files instead of through the live Backlog MCP interface.
|
||||
|
||||
2026-03-26: Scaffolded `plugins/subminer-workflow/` plus `.agents/plugins/marketplace.json`, moved the scrum-master and change-verification skill definitions into the plugin as the canonical sources, and converted the old `.agents/skills/` surfaces into compatibility shims. Preserved the old verifier script entrypoints as wrappers because backlog/docs history already calls them directly.
|
||||
|
||||
2026-03-26: Verification passed.
|
||||
|
||||
- `bash -n plugins/subminer-workflow/skills/subminer-change-verification/scripts/classify_subminer_diff.sh`
|
||||
- `bash -n plugins/subminer-workflow/skills/subminer-change-verification/scripts/verify_subminer_change.sh`
|
||||
- `bash -n .agents/skills/subminer-change-verification/scripts/classify_subminer_diff.sh`
|
||||
- `bash -n .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh`
|
||||
- `bash .agents/skills/subminer-change-verification/scripts/classify_subminer_diff.sh plugins/subminer-workflow/.codex-plugin/plugin.json docs/workflow/agent-plugins.md .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh`
|
||||
- `bash .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh --lane docs plugins/subminer-workflow .agents/skills/subminer-scrum-master/SKILL.md .agents/skills/subminer-change-verification/SKILL.md .agents/skills/subminer-change-verification/scripts/classify_subminer_diff.sh .agents/skills/subminer-change-verification/scripts/verify_subminer_change.sh .agents/plugins/marketplace.json docs/workflow/README.md docs/workflow/agent-plugins.md 'backlog/tasks/task-240 - Migrate-SubMiner-agent-skills-into-a-repo-local-plugin-workflow.md'`
|
||||
- Verifier artifacts: `.tmp/skill-verification/subminer-verify-20260326-232300-E2NQVX/`
|
||||
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
|
||||
Created a repo-local `subminer-workflow` plugin as the canonical packaging for the SubMiner scrum-master and change-verification workflow. The plugin now owns both skills, the verifier helper scripts, plugin metadata, and workflow docs. The old `.agents/skills/` surfaces remain only as compatibility shims, and the old verifier script paths now forward to the plugin-owned scripts so existing docs and backlog commands continue to work. Targeted plugin/docs verification passed, including wrapper-script syntax checks and a real verifier run through the legacy entrypoint.
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,37 @@
|
||||
id: TASK-241
|
||||
title: Add optional setup action to seed SubMiner mpv profile
|
||||
type: feature
|
||||
status: Open
|
||||
assignee: []
|
||||
created_date: '2026-03-27 11:22'
|
||||
updated_date: '2026-03-27 11:22'
|
||||
labels:
|
||||
- setup
|
||||
- mpv
|
||||
- docs
|
||||
- ux
|
||||
dependencies: []
|
||||
references: []
|
||||
documentation:
|
||||
- /home/sudacode/projects/japanese/SubMiner/docs-site/usage.md
|
||||
- /home/sudacode/projects/japanese/SubMiner/docs-site/launcher-script.md
|
||||
ordinal: 24100
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Add an optional control in the first-run / setup flow to write or update the user’s mpv configuration with SubMiner-recommended defaults (especially the `subminer` profile), so users can recover from a missing profile without manual config editing.
|
||||
|
||||
The docs for launcher usage must explicitly state that SubMiner’s Windows mpv launcher path runs mpv with `--profile=subminer` by default.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 Add an optional setup UI action/button to generate or overwrite a user-confirmed mpv config that includes a `subminer` profile.
|
||||
- [ ] #2 The action should be non-destructive by default, show diff/contents before write, and support append/update mode when other mpv settings already exist.
|
||||
- [ ] #3 Document how to resolve the missing-profile scenario and clearly state that the SubMiner mpv launcher runs with `--profile=subminer` by default (`--launch-mpv` / Windows mpv shortcut path).
|
||||
- [ ] #4 Add/adjust setup validation messaging so users are not blocked if `subminer` profile is initially missing, but can opt into one-click setup recovery.
|
||||
- [ ] #5 Include a short verification path for both Windows and non-Windows flows (for example dry-run + write path).
|
||||
<!-- AC:END -->
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
id: TASK-242
|
||||
title: Fix stats server Bun fallback in coverage lane
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 07:31'
|
||||
updated_date: '2026-03-29 07:37'
|
||||
labels:
|
||||
- ci
|
||||
- bug
|
||||
milestone: cleanup
|
||||
dependencies: []
|
||||
references:
|
||||
- 'PR #36'
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Coverage CI fails when `startStatsServer` reaches the Bun server seam under the maintained source lane. Add a runtime fallback that works when `Bun.serve` is unavailable and keep the stats-server startup path testable.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 `bun run test:coverage:src` passes in GitHub CI
|
||||
- [x] #2 `startStatsServer` uses `Bun.serve` when present and a Node server fallback otherwise
|
||||
- [x] #3 Regression coverage exists for the fallback startup path
|
||||
<!-- AC:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Fixed the CI failure in the coverage lane by replacing the Bun-only stats server path with a Bun-or-node/http startup fallback and by normalizing setup window options so undefined BrowserWindow fields are omitted. Verified the exact coverage lane under Bun 1.3.5 and confirmed the GitHub Actions run for PR #36 completed successfully.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,68 @@
|
||||
---
|
||||
id: TASK-245
|
||||
title: Cut minor release v0.10.0 for docs and release prep
|
||||
status: Done
|
||||
assignee:
|
||||
- '@codex'
|
||||
created_date: '2026-03-29 08:10'
|
||||
updated_date: '2026-03-29 08:13'
|
||||
labels:
|
||||
- release
|
||||
- docs
|
||||
- minor
|
||||
dependencies: []
|
||||
references:
|
||||
- /home/sudacode/projects/japanese/SubMiner/package.json
|
||||
- /home/sudacode/projects/japanese/SubMiner/README.md
|
||||
- /home/sudacode/projects/japanese/SubMiner/docs/RELEASING.md
|
||||
- /home/sudacode/projects/japanese/SubMiner/docs/README.md
|
||||
- /home/sudacode/projects/japanese/SubMiner/docs-site/changelog.md
|
||||
- /home/sudacode/projects/japanese/SubMiner/CHANGELOG.md
|
||||
- /home/sudacode/projects/japanese/SubMiner/release/release-notes.md
|
||||
priority: high
|
||||
ordinal: 54850
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Prepare the next 0-ver minor release cut as `v0.10.0`, keeping release-facing docs, backlog, and changelog artifacts aligned, then run the release-prep verification gate.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Repository version metadata is updated to `0.10.0`.
|
||||
- [x] #2 Release-facing docs and public changelog surfaces are aligned for the `v0.10.0` cut.
|
||||
- [x] #3 `CHANGELOG.md` and `release/release-notes.md` contain the committed `v0.10.0` section and any consumed fragments are removed.
|
||||
- [x] #4 Release-prep verification passes for changelog, config example, typecheck, tests, and build.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Completed:
|
||||
- Bumped `package.json` from `0.9.3` to `0.10.0`.
|
||||
- Ran `bun run changelog:build --version 0.10.0 --date 2026-03-29`, which generated `CHANGELOG.md` and `release/release-notes.md` and removed the queued `changes/*.md` fragments.
|
||||
- Updated `docs-site/changelog.md` with the public-facing `v0.10.0` summary.
|
||||
|
||||
Verification:
|
||||
- `bun run changelog:lint`
|
||||
- `bun run changelog:check --version 0.10.0`
|
||||
- `bun run verify:config-example`
|
||||
- `bun run typecheck`
|
||||
- `bunx bun@1.3.5 run test:fast`
|
||||
- `bunx bun@1.3.5 run test:env`
|
||||
- `bunx bun@1.3.5 run build`
|
||||
- `bunx bun@1.3.5 run docs:test`
|
||||
- `bunx bun@1.3.5 run docs:build`
|
||||
|
||||
Notes:
|
||||
- The local `bun` binary is `1.3.11`, which tripped Bun's nested `node:test` handling in `test:fast`; rerunning with the repo-pinned `bun@1.3.5` cleared the issue.
|
||||
- No README content change was necessary for this cut.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Prepared the `v0.10.0` release cut locally. Bumped `package.json`, generated committed root changelog and release notes, updated the public docs changelog summary, and verified the release gate with the repo-pinned Bun `1.3.5` runtime. The release prep is green and ready for tagging/publishing when desired.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,55 @@
|
||||
---
|
||||
id: TASK-246
|
||||
title: Migrate Discord Rich Presence to maintained RPC wrapper
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 08:17'
|
||||
updated_date: '2026-03-29 08:22'
|
||||
labels:
|
||||
- dependency
|
||||
- discord
|
||||
- presence
|
||||
dependencies: []
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Replace the deprecated Discord Rich Presence wrapper with a maintained JavaScript alternative while preserving the current IPC-based presence behavior in the Electron main process.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 The app no longer depends on `discord-rpc`
|
||||
- [x] #2 Discord Rich Presence still logs in and publishes activity updates from the main process
|
||||
- [x] #3 Existing Discord presence tests continue to pass or are updated to cover the new client API
|
||||
- [x] #4 The change is documented in the release notes or changelog fragment
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Completed:
|
||||
- Swapped the app's Discord RPC dependency from `discord-rpc` to `@xhayper/discord-rpc`.
|
||||
- Extracted the client adapter into `src/main/runtime/discord-rpc-client.ts` so the main process can keep using a small wrapper around the maintained library.
|
||||
- Added `src/main/runtime/discord-rpc-client.test.ts` to verify the adapter forwards login/activity/clear/destroy calls through `client.user`.
|
||||
- Documented the dependency swap in `CHANGELOG.md`, `release/release-notes.md`, and `docs-site/changelog.md`.
|
||||
|
||||
Verification:
|
||||
- `bunx bun@1.3.5 test src/main/runtime/discord-rpc-client.test.ts src/core/services/discord-presence.test.ts`
|
||||
- `bunx bun@1.3.5 run changelog:lint`
|
||||
- `bunx bun@1.3.5 run changelog:check --version 0.10.0`
|
||||
- `bunx bun@1.3.5 run docs:test`
|
||||
- `bunx bun@1.3.5 run docs:build`
|
||||
|
||||
Notes:
|
||||
- The existing release prep artifacts for v0.10.0 were kept intact and updated in place.
|
||||
- No README change was needed for this dependency swap.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Replaced the deprecated `discord-rpc` dependency with the maintained `@xhayper/discord-rpc` wrapper while preserving the main-process rich presence flow. Added a focused runtime wrapper test, kept the existing Discord presence service tests green, and documented the dependency swap in the release notes and changelog.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,60 @@
|
||||
---
|
||||
id: TASK-247
|
||||
title: Strip inline subtitle markup from subtitle sidebar cues
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-29 10:01'
|
||||
updated_date: '2026-03-29 10:10'
|
||||
labels: []
|
||||
dependencies: []
|
||||
references:
|
||||
- src/core/services/subtitle-cue-parser.ts
|
||||
- src/renderer/modals/subtitle-sidebar.ts
|
||||
- src/core/services/subtitle-cue-parser.test.ts
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Subtitle sidebar should display readable subtitle text when loaded subtitle files include inline markup such as HTML-like font tags. Parsed cue text currently preserves markup, causing raw tags to appear in the sidebar instead of clean subtitle content.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Subtitle sidebar cue text omits inline subtitle markup such as HTML-like font tags while preserving visible subtitle content.
|
||||
- [x] #2 Parsed subtitle cues used by the sidebar keep timing order and expected line-break behavior after markup sanitization.
|
||||
- [x] #3 Regression tests cover markup-bearing subtitle cue parsing so raw tags do not reappear in the sidebar.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Add regression tests in src/core/services/subtitle-cue-parser.test.ts for subtitle cues containing HTML-like font tags, including multi-line content.
|
||||
2. Verify the new parser test fails against current behavior to confirm the bug is covered.
|
||||
3. Update src/core/services/subtitle-cue-parser.ts to sanitize inline subtitle markup while preserving visible text and expected newline handling.
|
||||
4. Re-run focused parser tests, then run broader verification commands required for handoff as practical.
|
||||
5. Update task notes/acceptance criteria based on verified results and finalize the task record.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
User approved implementation on 2026-03-29.
|
||||
|
||||
Implemented parser-level subtitle cue sanitization for HTML-like tags so loaded sidebar cues render readable text while preserving cue line breaks.
|
||||
|
||||
Added regression coverage for SRT and ASS cue parsing with <font ...> markup.
|
||||
|
||||
Verification: bun test src/core/services/subtitle-cue-parser.test.ts; bun run typecheck; bun run test:fast; bun run test:env; bun run build; bun run test:smoke:dist.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Sanitized parsed subtitle cue text in src/core/services/subtitle-cue-parser.ts so HTML-like inline markup such as <font ...> is removed before cues reach the subtitle sidebar. The sanitizer is shared across SRT/VTT-style parsing and ASS parsing, while existing cue timing and line-break semantics remain intact.
|
||||
|
||||
Added regression tests in src/core/services/subtitle-cue-parser.test.ts covering markup-bearing SRT lines and ASS dialogue lines with \N breaks, and verified the original failure before implementing the fix.
|
||||
|
||||
Tests run: bun test src/core/services/subtitle-cue-parser.test.ts; bun run typecheck; bun run test:fast; bun run test:env; bun run build; bun run test:smoke:dist.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,69 @@
|
||||
---
|
||||
id: TASK-248
|
||||
title: Fix macOS visible overlay toggle getting immediately restored
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 10:03'
|
||||
updated_date: '2026-03-29 22:14'
|
||||
labels: []
|
||||
dependencies: []
|
||||
references:
|
||||
- /Users/sudacode/projects/japanese/SubMiner/plugin/subminer/process.lua
|
||||
- /Users/sudacode/projects/japanese/SubMiner/plugin/subminer/ui.lua
|
||||
- /Users/sudacode/projects/japanese/SubMiner/src/core/services/cli-command.ts
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/main/overlay-visibility-runtime.ts
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Investigate and fix the visible overlay toggle path on macOS so the user can reliably hide the overlay after it has been shown. The current behavior can ignore the toggle or hide the overlay briefly before it is restored immediately.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Pressing the visible-overlay toggle hides the overlay when it is currently shown on macOS.
|
||||
- [x] #2 A manual hide is not immediately undone by startup or readiness flows.
|
||||
- [x] #3 The mpv/plugin toggle path matches the intended visible-overlay toggle behavior.
|
||||
- [x] #4 Regression tests cover the failing toggle path.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Reproduce the toggle/re-show logic from code paths around mpv plugin control commands and auto-play readiness.
|
||||
2. Add regression coverage for manual toggle-off staying hidden through readiness completion.
|
||||
3. Patch the plugin/control path so manual visible-overlay toggles are not undone by readiness auto-show.
|
||||
4. Run targeted tests, then the relevant verification lane.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Root cause: the mpv plugin readiness callback (`subminer-autoplay-ready`) could re-issue `--show-visible-overlay` after a manual toggle/hide. Initial fix only suppressed the next readiness restore, but repeated readiness callbacks in the same media session could still re-show the overlay. The plugin toggle path also still used legacy `--toggle` instead of the explicit visible-overlay command.
|
||||
|
||||
Implemented a session-scoped suppression flag in the Lua plugin so a manual hide/toggle during the pause-until-ready window blocks readiness auto-show for the rest of the current auto-start session, then resets on the next auto-start session.
|
||||
|
||||
Added Lua regression coverage for both behaviors: manual toggle-off stays hidden through readiness completion, repeated readiness callbacks in the same session stay suppressed, and `subminer-toggle` emits `--toggle-visible-overlay` rather than legacy `--toggle`.
|
||||
|
||||
Follow-up investigation found a second issue in `src/core/services/cli-command.ts`: pure visible-overlay toggle commands still ran the MPV connect/start path (`connectMpvClient`) because `--toggle` and `--toggle-visible-overlay` were classified as start-like commands. That side effect could retrigger startup visibility work even after the plugin-side fix.
|
||||
|
||||
Updated CLI command handling so only `--start` reconnects MPV. Pure toggle/show/hide overlay commands still initialize overlay runtime when needed, but they no longer restart/reconnect the MPV control path.
|
||||
|
||||
Renderer/modal follow-ups: restored focused-overlay mpv y-chord proxy in `src/renderer/handlers/keyboard.ts`, added a modal-close guard in `src/main/overlay-runtime.ts` so modal teardown does not re-show a manually hidden overlay, and added a duplicate-toggle debounce in `src/main/runtime/overlay-visibility-actions.ts` to ignore near-simultaneous toggle requests inside the main process.
|
||||
|
||||
2026-03-29: added regression for repeated subminer-autoplay-ready signals after manual y-t hide. Root cause: Lua plugin suppression only blocked the first ready-time restore, so later ready callbacks in the same media session could re-show the visible overlay. Updated plugin suppression to remain active for the full current auto-start session and reset on the next auto-start trigger.
|
||||
|
||||
2026-03-29: live mpv log showed repeated `subminer-autoplay-ready` script messages from Electron during paused startup, each triggering plugin `--show-visible-overlay` and immediate re-show. Fixed `src/main/runtime/autoplay-ready-gate.ts` so plugin readiness is signaled once per media while paused retry loops only re-issue `pause=false` instead of re-signaling readiness.
|
||||
|
||||
2026-03-29: Added window-level guard for stray visible-overlay re-show on macOS. `src/core/services/overlay-window.ts` now immediately re-hides the visible overlay window on `show` if overlay state is false, covering native/Electron re-show paths that bypass normal visibility actions. Regression: `src/core/services/overlay-window.test.ts`. Verified with full gate and rebuilt unsigned mac bundle.
|
||||
|
||||
2026-03-29: added a blur-path guard for the visible overlay window. `src/core/services/overlay-window.ts` now skips topmost restacking when a visible-overlay blur fires after overlay state already flipped off, covering a macOS hide-in-flight path that could immediately reassert the window. Regression coverage added in `src/core/services/overlay-window.test.ts`; verified with targeted overlay tests, full gate, and rebuilt unsigned mac bundle.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Confirmed with user that macOS `y-t` now works. Cleaned the patch set down to the remaining justified fixes: explicit visible-overlay plugin toggle/suppression, pure-toggle CLI no longer reconnects MPV, autoplay-ready signaling only fires once per media, and the final visible-overlay blur guard that stops macOS restacking after a manual hide. Full gate passed again before commit `c939c580` (`fix: stabilize macOS visible overlay toggle`).
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,37 @@
|
||||
---
|
||||
id: TASK-249
|
||||
title: Fix AniList token persistence on setup login
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 10:08'
|
||||
updated_date: '2026-03-29 19:42'
|
||||
labels:
|
||||
- anilist
|
||||
- bug
|
||||
dependencies: []
|
||||
documentation:
|
||||
- src/main/runtime/anilist-setup.ts
|
||||
- src/core/services/anilist/anilist-token-store.ts
|
||||
- src/main/runtime/anilist-token-refresh.ts
|
||||
- docs-site/anilist-integration.md
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
AniList setup can appear successful but the token is not persisted across restarts. Investigate the setup callback and token store path so the app either saves the token reliably or surfaces persistence failure instead of reopening setup on every launch.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 AniList setup login persists a usable token across app restarts when safeStorage works
|
||||
- [ ] #2 If token persistence fails the setup flow reports the failure instead of pretending login succeeded
|
||||
- [ ] #3 Regression coverage exists for the callback/save path and the refresh path that reopens setup when no token is available
|
||||
<!-- AC:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Pinned installed mpv plugin configs to the current SubMiner binary so standalone mpv launches reuse the same app identity that saved AniList tokens. Added startup self-heal for existing blank binary_path configs, install-time binary_path writes for fresh plugin installs, regression tests for both paths, and docs updates describing the new behavior.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,72 @@
|
||||
---
|
||||
id: TASK-250
|
||||
title: Restore macOS mpv passthrough while overlay subtitle sidebar is open
|
||||
status: Done
|
||||
assignee:
|
||||
- '@codex'
|
||||
created_date: '2026-03-29 10:10'
|
||||
updated_date: '2026-03-29 10:23'
|
||||
labels:
|
||||
- bug
|
||||
- macos
|
||||
- subtitle-sidebar
|
||||
- overlay
|
||||
- mpv
|
||||
dependencies: []
|
||||
references:
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/renderer/overlay-mouse-ignore.ts
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/renderer/modals/subtitle-sidebar.ts
|
||||
- /Users/sudacode/projects/japanese/SubMiner/src/renderer/handlers/keyboard.ts
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/renderer/modals/subtitle-sidebar.test.ts
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/renderer/overlay-mouse-ignore.test.ts
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
When the overlay-layout subtitle sidebar is open on macOS, users should still be able to click through outside the sidebar and return keyboard focus to mpv so native mpv keybindings continue to work. The sidebar should stay interactive when hovered or focused, but it must not make the whole visible overlay behave like a blocking modal.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Opening the overlay-layout subtitle sidebar does not keep the entire visible overlay mouse-interactive outside sidebar hover or focus.
|
||||
- [x] #2 With the subtitle sidebar open, clicking outside the sidebar can refocus mpv so native mpv keybindings continue to work.
|
||||
- [x] #3 Focused regression coverage exists for overlay-layout sidebar passthrough behavior on mouse-ignore state changes.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Add renderer regression coverage for overlay-layout subtitle sidebar passthrough so open-but-unhovered sidebar no longer holds global mouse interaction.
|
||||
2. Update overlay mouse-ignore gating to keep the subtitle sidebar interactive only while hovered or otherwise actively interacting, instead of treating overlay layout as a blocking modal.
|
||||
3. Run focused renderer tests for subtitle sidebar and mouse-ignore behavior, then update task notes/criteria with the verified outcome.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Confirmed the regression only affects the default overlay-layout subtitle sidebar: open sidebar state was treated as a blocking overlay modal, which prevented click-through outside the sidebar and stranded native mpv keybindings until focus was manually recovered.
|
||||
|
||||
Added a failing regression in src/renderer/modals/subtitle-sidebar.test.ts for overlay-layout passthrough before changing the gate.
|
||||
|
||||
Verification: bun test src/renderer/modals/subtitle-sidebar.test.ts src/renderer/overlay-mouse-ignore.test.ts; bun run typecheck
|
||||
|
||||
User reported the first renderer-only fix did not resolve the macOS issue in practice. Reopening investigation to trace visible-overlay window focus and hit-testing outside the renderer mouse-ignore gate.
|
||||
|
||||
Follow-up root cause: sidebar hover handlers were attached to the full-screen `.subtitle-sidebar-modal` shell instead of the actual sidebar panel. On the transparent visible overlay that shell spans the viewport, so sidebar-active state could persist outside the panel and keep the overlay interactive longer than intended.
|
||||
|
||||
Updated the sidebar modal to track hover/focus on `subtitleSidebarContent` and derive sidebar interaction state from panel hover or focus-within before recomputing mouse passthrough.
|
||||
|
||||
Verification refresh: bun test src/renderer/modals/subtitle-sidebar.test.ts src/renderer/overlay-mouse-ignore.test.ts; bun run typecheck
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Restored overlay subtitle sidebar passthrough in two layers. First, the visible overlay mouse-ignore gate no longer treats the subtitle sidebar as a global blocking modal. Second, the sidebar panel now tracks interaction on the real sidebar content instead of the full-screen modal shell, and keeps itself active only while the panel is hovered or focused. Added regressions for overlay-layout passthrough and focus-within behavior. Verification: `bun test src/renderer/modals/subtitle-sidebar.test.ts src/renderer/overlay-mouse-ignore.test.ts` and `bun run typecheck`.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,32 @@
|
||||
---
|
||||
id: TASK-251
|
||||
title: 'Docs: add subtitle sidebar and Jimaku integration pages'
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 22:36'
|
||||
updated_date: '2026-03-29 22:38'
|
||||
labels:
|
||||
- docs
|
||||
dependencies: []
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Track the docs-site update that adds a dedicated subtitle sidebar page, links Jimaku integration from the homepage/config docs, and refreshes the docs-site theme styling used by those pages.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 docs-site nav includes a Subtitle Sidebar entry
|
||||
- [x] #2 Subtitle Sidebar page documents layout, shortcut, and config options
|
||||
- [x] #3 Jimaku integration page and configuration docs link to the new docs page
|
||||
- [x] #4 Changelog fragment exists for the user-visible docs release note
|
||||
<!-- AC:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Added the subtitle sidebar docs page and nav entry, linked Jimaku integration from the homepage/config docs, refreshed docs-site styling tokens, and recorded the release note fragment. Verified with `bun run changelog:lint`, `bun run docs:test`, `bun run docs:build`, and `bun run build`. Full repo test gate still has pre-existing failures in `bun run test:fast` and `bun run test:env` unrelated to these docs changes.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
id: TASK-252
|
||||
title: Harden AUR publish release step against transient SSH failures
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-29 23:46'
|
||||
updated_date: '2026-03-29 23:49'
|
||||
labels:
|
||||
- release
|
||||
- ci
|
||||
- aur
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Make tagged releases resilient when the automated AUR update hits transient SSH disconnects from GitHub-hosted runners. The GitHub Release should still complete successfully, while AUR publish should retry a few times and downgrade persistent AUR failures to warnings instead of failing the entire release workflow.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Tagged release workflow retries the AUR clone/push path with bounded backoff when AUR SSH disconnects transiently.
|
||||
- [x] #2 Persistent AUR publish failure does not fail the overall tagged release workflow or block GitHub Release publication.
|
||||
- [x] #3 Release documentation notes that AUR publish is best-effort and may need manual follow-up when retries are exhausted.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Updated .github/workflows/release.yml so AUR secret/configure/clone/push failures downgrade to warnings, clone/push retry three times with linear backoff, and the GitHub Release path remains green.
|
||||
|
||||
Documented AUR publish as best-effort in docs/RELEASING.md and added changes/253-aur-release-best-effort.md for PR changelog compliance.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
@@ -0,0 +1,68 @@
|
||||
---
|
||||
id: TASK-253
|
||||
title: Fix animated AVIF lead-in alignment with sentence audio
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-30 01:59'
|
||||
updated_date: '2026-03-30 02:03'
|
||||
labels: []
|
||||
dependencies: []
|
||||
references:
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/anki-integration/animated-image-sync.ts
|
||||
- /Users/sudacode/projects/japanese/SubMiner/src/anki-integration.ts
|
||||
- /Users/sudacode/projects/japanese/SubMiner/src/core/services/stats-server.ts
|
||||
- /Users/sudacode/projects/japanese/SubMiner/src/media-generator.ts
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Animated AVIF cards currently freeze only for the existing word-audio duration. Because generated sentence audio starts with configured audio padding before the spoken subtitle begins, animation motion can begin early instead of lining up with the spoken sentence. Update the shared lead-in calculation so animated motion begins when sentence speech begins after the chosen word audio finishes.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Animated AVIF lead-in calculation includes both the chosen word-audio duration and the generated sentence-audio start offset so motion begins with spoken sentence audio
|
||||
- [x] #2 Shared animated-image sync behavior is applied consistently across the Anki note update, card creation, and stats server media-generation paths
|
||||
- [x] #3 Regression tests cover the corrected lead-in timing calculation and fail before the fix
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
Approved plan:
|
||||
1. Add a failing unit test proving animated-image lead-in must include sentence-audio start offset in addition to chosen word-audio duration.
|
||||
2. Update shared animated-image lead-in resolution to add the configured sentence-audio offset used by generated sentence audio.
|
||||
3. Thread the shared calculation through note update, card creation, and stats-server generation paths without duplicating timing logic.
|
||||
4. Run targeted tests first, then the relevant fast verification lane for touched files.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
User approved implementation on 2026-03-29 local time. Root cause: lead-in omitted sentence-audio padding offset, so AVIF motion began before spoken sentence audio.
|
||||
|
||||
Implemented shared animated-image lead-in fix in src/anki-integration/animated-image-sync.ts by adding the same sentence-audio start offset used by generated audio (`audioPadding`) after summing the chosen word-audio durations.
|
||||
|
||||
Added regression coverage in src/anki-integration/animated-image-sync.test.ts for explicit `audioPadding` lead-in alignment and kept the zero-padding case covered.
|
||||
|
||||
Verification passed: `bun test src/anki-integration/animated-image-sync.test.ts src/anki-integration/note-update-workflow.test.ts src/media-generator.test.ts`, `bun run typecheck`, `bun run test:fast`, `bun run test:env`, `bun run build`, `bun run test:smoke:dist`.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Fixed animated AVIF lead-in alignment so motion starts when the spoken sentence starts, not at the padded beginning of the generated sentence-audio clip. The shared resolver in `src/anki-integration/animated-image-sync.ts` now adds the configured/default `audioPadding` offset after summing the selected word-audio durations, which keeps note update, card creation, and stats-server generation paths aligned through the same logic.
|
||||
|
||||
Added regression coverage in `src/anki-integration/animated-image-sync.test.ts` for both zero-padding and explicit padding cases to prove the lead-in math matches sentence-audio timing.
|
||||
|
||||
Verification:
|
||||
- `bun test src/anki-integration/animated-image-sync.test.ts src/anki-integration/note-update-workflow.test.ts src/media-generator.test.ts`
|
||||
- `bun run typecheck`
|
||||
- `bun run test:fast`
|
||||
- `bun run test:env`
|
||||
- `bun run build`
|
||||
- `bun run test:smoke:dist`
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,59 @@
|
||||
---
|
||||
id: TASK-254
|
||||
title: Fix AniList token persistence when safe storage is unavailable
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-30 02:10'
|
||||
updated_date: '2026-03-30 02:20'
|
||||
labels:
|
||||
- bug
|
||||
- anilist
|
||||
dependencies: []
|
||||
references:
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/core/services/anilist/anilist-token-store.ts
|
||||
- /Users/sudacode/projects/japanese/SubMiner/src/main/runtime/anilist-setup.ts
|
||||
- >-
|
||||
/Users/sudacode/projects/japanese/SubMiner/src/main/runtime/anilist-token-refresh.ts
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
AniList login currently appears to succeed during setup, but some environments cannot persist the token because Electron safeStorage is unavailable or unusable. On the next app start, AniList tracking cannot load the token and re-prompts the user to set up AniList again. Align AniList token persistence with the intended login UX so a token the user already saved is reused on later sessions.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Saved encrypted AniList token is reused on app-ready startup without reopening setup.
|
||||
- [x] #2 AniList startup no longer attempts to open the setup BrowserWindow before Electron is ready.
|
||||
- [x] #3 AniList auth/runtime tests cover stored-token reuse and the missing-token startup path that previously triggered pre-ready setup attempts.
|
||||
- [x] #4 AniList token storage remains encrypted-only; no plaintext fallback is introduced.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Add regression tests for AniList startup auth refresh so a stored encrypted token is reused without opening setup, and for the missing-token path so setup opening is deferred safely until the app can actually show a window.
|
||||
2. Update AniList startup/auth runtime to separate token resolution from setup-window prompting, and gate prompting on app readiness instead of attempting BrowserWindow creation during early startup.
|
||||
3. Preserve encrypted-only storage semantics in anilist-token-store; do not add plaintext fallback. If stored-token load fails, keep logging/diagnostics intact.
|
||||
4. Run targeted AniList runtime/token tests, then summarize root cause and verification results.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Investigated AniList auth persistence flow. Current setup path treats callback token as saved even when anilist-token-store refuses persistence because safeStorage is unavailable. Jellyfin token store already uses plaintext fallback in this environment class, which is a likely model for the AniList fix.
|
||||
|
||||
Confirmed from local logs that safeStorage was explicitly unavailable on 2026-03-23 due macOS Keychain lookup failure with NSOSStatusErrorDomain Code=-128 userCanceledErr. Current environment also has an encrypted AniList token file at /Users/sudacode/.config/SubMiner/anilist-token-store.json updated 2026-03-29 18:49, so safeStorage did work recently for save. Repeated AniList setup prompts on 2026-03-29/30 correlate more strongly with startup auth flow deciding no token is available and opening setup immediately; logs show repeated 'Loaded AniList manual token entry page' and several 'Failed to refresh AniList client secret state during startup' errors with 'Cannot create BrowserWindow before app is ready'. No recent log lines indicate safeStorage.isEncryptionAvailable() false after 2026-03-23.
|
||||
|
||||
Implemented encrypted-only startup fix by adding an allowSetupPrompt control to AniList token refresh and disabling setup-window prompting for the early pre-ready startup refresh in main.ts. App-ready reloadConfig still performs the normal prompt-capable refresh after Electron is ready. Added regression tests for stored-token reuse and prompt suppression when startup explicitly disables prompting.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Root cause was a redundant early AniList auth refresh during startup. The app refreshed AniList auth once before Electron was ready and again during app-ready config reload. When the early refresh could not resolve a token, it tried to open the AniList setup window immediately, which produced the observed 'Cannot create BrowserWindow before app is ready' failures and repeated setup prompts. The fix keeps token storage encrypted-only, teaches AniList auth refresh to optionally suppress setup-window prompting, and uses that suppression for the early startup refresh. App-ready startup still performs the normal prompt-capable refresh once Electron is ready, so saved encrypted tokens are reused without reopening setup and missing-token setup only happens at a safe point. Verified with targeted AniList auth tests, typecheck, test:fast, test:env, build, and test:smoke:dist.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,61 @@
|
||||
---
|
||||
id: TASK-255
|
||||
title: Add overlay playlist browser modal for sibling video files and mpv queue
|
||||
status: In Progress
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-30 05:46'
|
||||
updated_date: '2026-03-30 08:34'
|
||||
labels:
|
||||
- feature
|
||||
- overlay
|
||||
- mpv
|
||||
- launcher
|
||||
dependencies: []
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Add an in-session overlay modal that opens from a keybinding during active playback and lets the user browse video files from the current file's parent directory alongside the active mpv playlist. The modal should sort local files in best-effort episode order, highlight the current item, and allow keyboard/mouse interaction to add files into the mpv queue, remove queued items, and reorder queued items without leaving playback.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [ ] #1 An overlay modal can be opened during active playback from a dedicated keybinding and closed without disrupting existing modal behavior.
|
||||
- [ ] #2 The modal shows video files from the current media file's parent directory in best-effort episode order and highlights the current file when present.
|
||||
- [ ] #3 The modal shows the active mpv playlist/queue with enough metadata to identify the current item and queued order.
|
||||
- [ ] #4 The user can add a directory file to the mpv playlist, remove playlist items, and reorder playlist items from the modal using both mouse and keyboard interactions.
|
||||
- [ ] #5 Modal state stays in sync after playlist mutations so the rendered queue reflects mpv's current playlist order.
|
||||
- [ ] #6 Feature coverage includes automated tests for ordering/playlist behavior and docs or shortcut/help updates for the new modal.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Add playlist-browser domain types, IPC channels, overlay modal registration, special command, and default keybinding for Ctrl+Alt+P.
|
||||
2. Write failing tests for best-effort episode sorting and main playlist-browser runtime snapshot/mutation behavior.
|
||||
3. Implement playlist-browser main/runtime helpers for local sibling video discovery, mpv playlist normalization, and append/play/remove/move operations with refreshed snapshots.
|
||||
4. Wire preload and main-process IPC handlers that expose snapshot and mutation methods to the renderer.
|
||||
5. Write failing renderer and keyboard tests for modal open/close, split-pane interaction, keyboard controls, and degraded states.
|
||||
6. Implement playlist-browser modal markup, DOM/state, renderer composition, keyboard routing, and session-help labeling.
|
||||
7. Run targeted test lanes first, then the maintained verification gate relevant to the touched surfaces; update task notes/criteria as checks pass.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Implemented overlay playlist browser modal with split directory/playlist panes, Ctrl+Alt+P keybinding, main/preload IPC, mpv queue mutations, and best-effort sibling episode sorting.
|
||||
|
||||
Added tests for sort/runtime logic, IPC wiring, keyboard routing, and playlist-browser modal behavior.
|
||||
|
||||
Verification: `bun run typecheck` passed; targeted playlist-browser and IPC tests passed; `bun run build` passed; `bun run test:smoke:dist` passed.
|
||||
|
||||
Repo gate blockers outside this feature: `bun run test:fast` hits existing Bun `node:test` NotImplementedError cases plus unrelated immersion-tracker failures; `bun run test:env` fails in existing immersion-tracker sqlite tests.
|
||||
|
||||
2026-03-30: Fixed playlist-browser local playback regression where subtitle track IDs leaked across episode jumps. `playPlaylistBrowserIndexRuntime` now reapplies local subtitle auto-selection defaults (`sub-auto=fuzzy`, `sid=auto`, `secondary-sid=auto`) before `playlist-play-index` for local filesystem targets only; remote playlist entries remain untouched. Added runtime regression tests for both paths.
|
||||
|
||||
2026-03-30: Follow-up subtitle regression fix. Pre-jump `sid=auto` was ineffective because mpv resolved it against the current episode before `playlist-play-index`. Local playlist jumps now set `sub-auto=fuzzy`, switch episodes, then schedule a delayed rearm of `sid=auto` and `secondary-sid=auto` so selection happens against the new file's tracks. Added failing-first runtime coverage for delayed local rearm and remote no-op behavior.
|
||||
|
||||
2026-03-30: Cleaned up playlist-browser runtime local-play subtitle-rearm flow by extracting focused helpers without changing behavior. Added public docs/readme coverage for the default `Ctrl+Alt+P` playlist browser keybinding and modal, plus changelog fragment `changes/260-playlist-browser.md`. Verification: `bun test src/main/runtime/playlist-browser-runtime.test.ts`, `bun run typecheck`, `bun run docs:test`, `bun run docs:build`, `bun run changelog:lint`, `bun run build`.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
@@ -0,0 +1,56 @@
|
||||
---
|
||||
id: TASK-256
|
||||
title: Fix texthooker page live websocket connect/send regression
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-30 06:04'
|
||||
updated_date: '2026-03-30 06:12'
|
||||
labels:
|
||||
- bug
|
||||
- texthooker
|
||||
- websocket
|
||||
dependencies: []
|
||||
priority: medium
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Investigate why the bundled texthooker page loads at the local HTTP endpoint but does not reliably connect to the configured websocket feed or receive/display live subtitle lines. Identify the regression in the SubMiner startup/bootstrap or vendored texthooker client path, restore live line delivery, and cover the fix with focused regression tests and any required docs updates.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Bundled texthooker connects to the intended websocket endpoint on launch using the configured/default SubMiner startup path.
|
||||
- [x] #2 Incoming subtitle or annotation websocket messages are accepted by the bundled texthooker and rendered as live lines.
|
||||
- [x] #3 Regression coverage fails before the fix and passes after the fix for the identified breakage.
|
||||
- [x] #4 Relevant docs/config notes are updated if user-facing behavior or troubleshooting guidance changes.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Add a focused CLI regression test covering `--texthooker` startup when the runtime has a resolved websocket URL, proving the handler currently starts texthooker without that URL.
|
||||
2. Extend CLI texthooker dependencies/runtime wiring so the handler can retrieve the resolved texthooker websocket URL from current config/runtime state.
|
||||
3. Update the CLI texthooker flow to pass the resolved websocket URL into texthooker startup instead of starting the HTTP server with only a port.
|
||||
4. Run focused tests for CLI command handling and texthooker bootstrap behavior; update task notes/final summary with the verified root cause and fix.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Root cause: the CLI `--texthooker` path started the HTTP texthooker server with only the port, so the served page never received `bannou-texthooker-websocketUrl` and fell back to the vendored default `ws://localhost:6677`. In environments where the regular websocket was skipped or the annotation websocket should have been used, the page stayed on `Connecting...` and never received lines.
|
||||
|
||||
Fix: added a shared `resolveTexthookerWebsocketUrl(...)` helper for websocket selection, reused it in both app-ready startup and CLI texthooker context wiring, and threaded the resolved websocket URL through `handleCliCommand` into `Texthooker.start(...)`.
|
||||
|
||||
Verification: `bun run typecheck`; focused Bun tests for texthooker bootstrap, startup, CLI command handling, and CLI context wiring; browser-level repro against a throwaway source-backed texthooker server confirmed the page bootstraps `ws://127.0.0.1:6678`, connects successfully, and renders live sample lines (`テスト一`, `テスト二`).
|
||||
|
||||
Docs: no user-facing behavior change beyond restoring the intended existing behavior, so no docs update was required.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Restored texthooker live line delivery for the CLI/startup path that launched the page without a resolved websocket URL. Shared websocket URL resolution between app-ready startup and CLI texthooker context, forwarded that URL into `Texthooker.start(...)`, added regression coverage for the CLI path, and verified both by focused tests and a browser-level throwaway server that connected on `ws://127.0.0.1:6678` and rendered live sample lines.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
id: TASK-257
|
||||
title: Fix texthooker-only mode startup to initialize websocket pipeline
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-30 06:15'
|
||||
updated_date: '2026-03-30 06:17'
|
||||
labels:
|
||||
- bug
|
||||
- texthooker
|
||||
- websocket
|
||||
- startup
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Investigate and fix `--texthooker` / `subminer texthooker` startup so it launches the texthooker page without the overlay window but still initializes the runtime pieces required for live subtitle delivery. Today texthooker-only mode serves the page yet skips mpv client and websocket startup, leaving the page pointed at `ws://127.0.0.1:6678` with no listener behind it.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 `--texthooker` mode starts the texthooker page without opening the overlay window and still initializes the websocket path needed for live subtitle delivery.
|
||||
- [x] #2 Texthooker-only startup creates the mpv/websocket runtime needed for the configured annotation or subtitle websocket feed.
|
||||
- [x] #3 Regression coverage fails before the fix and passes after the fix for texthooker-only startup.
|
||||
- [x] #4 Docs/help text remain accurate for texthooker-only behavior; update docs only if wording needs correction.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Replace the existing texthooker-only startup regression test so it asserts websocket/mpv startup still happens while overlay window initialization stays skipped.
|
||||
2. Remove or narrow the early texthooker-only short-circuit in app-ready startup so runtime config, mpv client, subtitle websocket, and annotation websocket still initialize.
|
||||
3. Run focused tests plus a local process check proving `--texthooker` now opens the websocket listener expected by the served page.
|
||||
4. Update task notes/final summary with the live-process root cause (`--texthooker` serving HTML on 5174 with no 6678 listener).
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Live-process repro on the user's machine: `ps` showed the active process as `/tmp/.mount_SubMin.../SubMiner --texthooker --port 5174`. `lsof` showed 5174 listening but no listener on 6678/6677, while `curl http://127.0.0.1:5174/` confirmed the served page was correctly bootstrapped to `ws://127.0.0.1:6678`. That proved the remaining failure was startup mode, not page injection.
|
||||
|
||||
Root cause: `runAppReadyRuntime(...)` had an early `texthookerOnlyMode` return that reloaded config and handled initial args, but skipped `createMpvClient()`, subtitle websocket startup, annotation websocket startup, subtitle timing tracker creation, and the later texthooker-only branch that only skips the overlay window.
|
||||
|
||||
Fix: removed the early texthooker-only short-circuit so texthooker-only mode now runs the normal startup pipeline, then falls through to the existing `Texthooker-only mode enabled; skipping overlay window.` branch.
|
||||
|
||||
Verification: `bun run typecheck`; focused Bun tests for app-ready startup, startup bootstrap, CLI texthooker startup, and CLI context wiring. Existing local live-binary repro still reflects the old mounted AppImage until rebuilt/restarted. Current-binary workaround is to launch normal startup / `--start --texthooker` instead of plain `--texthooker`.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Fixed the second texthooker regression: plain `--texthooker` mode was serving the page but skipping mpv/websocket initialization, so the page pointed at `ws://127.0.0.1:6678` with no listener. Removed the early texthooker-only startup return, kept the later overlay-skip behavior, updated the startup regression test to require websocket/mpv initialization in texthooker-only mode, and re-verified with typecheck plus focused test coverage.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
id: TASK-258
|
||||
title: Stop plugin auto-start from spawning separate texthooker helper
|
||||
status: Done
|
||||
assignee:
|
||||
- codex
|
||||
created_date: '2026-03-30 06:25'
|
||||
updated_date: '2026-03-30 06:26'
|
||||
labels:
|
||||
- bug
|
||||
- texthooker
|
||||
- launcher
|
||||
- plugin
|
||||
- startup
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Change the mpv/plugin auto-start path so normal SubMiner startup owns texthooker and websocket startup inside the main `--start` app instance. Keep standalone `subminer texthooker` / plain `--texthooker` available for explicit external use, but stop the plugin from spawning a second helper subprocess during regular auto-start.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 Plugin auto-start includes texthooker on the main `--start` command when texthooker is enabled.
|
||||
- [x] #2 Plugin auto-start no longer spawns a separate standalone `--texthooker` helper subprocess during normal startup.
|
||||
- [x] #3 Regression coverage fails before the fix and passes after the fix for the plugin auto-start path.
|
||||
- [x] #4 Standalone external `subminer texthooker` / plain `--texthooker` entrypoints remain available for explicit helper use.
|
||||
<!-- AC:END -->
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
<!-- SECTION:PLAN:BEGIN -->
|
||||
1. Flip the mpv/plugin start-gate regression so enabled texthooker is folded into the main `--start` command and standalone helper subprocesses are rejected.
|
||||
2. Update plugin process command construction so `start` includes `--texthooker` when enabled and the separate helper-launch path becomes a no-op for normal auto-start.
|
||||
3. Run plugin Lua regressions, adjacent launcher tests, and typecheck to verify behavior and preserve explicit standalone `--texthooker` entrypoints.
|
||||
<!-- SECTION:PLAN:END -->
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
<!-- SECTION:NOTES:BEGIN -->
|
||||
Design approved by user: normal in-app startup should own texthooker/websocket; `texthookerOnlyMode` should stay explicit external-only.
|
||||
|
||||
Root cause path: mpv/plugin auto-start in `plugin/subminer/process.lua` launched `binary_path --start ...` and then separately spawned `binary_path --texthooker --port ...`. That created the standalone helper process observed live (`SubMiner --texthooker --port 5174`) instead of relying on the normal app instance.
|
||||
|
||||
Fix: `build_command_args('start', overrides)` now appends `--texthooker` when texthooker is enabled, and the old helper-launch path is reduced to a no-op so normal auto-start remains single-process.
|
||||
|
||||
Verification: `lua scripts/test-plugin-start-gate.lua`, `lua scripts/test-plugin-process-start-retries.lua`, `bun test launcher/mpv.test.ts launcher/commands/playback-command.test.ts launcher/config/args-normalizer.test.ts`, and `bun run typecheck`. Standalone launcher/app entrypoints for explicit `subminer texthooker` / plain `--texthooker` were left untouched.
|
||||
<!-- SECTION:NOTES:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Stopped the mpv/plugin auto-start path from spawning a second standalone texthooker helper. Texthooker now rides on the main `--start` app instance for normal startup, with Lua regressions updated to require `--texthooker` on the main start command and reject separate helper subprocesses. Explicit standalone `subminer texthooker` / plain `--texthooker` entrypoints remain available.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
@@ -0,0 +1,33 @@
|
||||
---
|
||||
id: TASK-259
|
||||
title: Fix integrated --start --texthooker startup skipping texthooker server
|
||||
status: Done
|
||||
assignee: []
|
||||
created_date: '2026-03-30 06:48'
|
||||
updated_date: '2026-03-30 06:56'
|
||||
labels:
|
||||
- bug
|
||||
- texthooker
|
||||
- startup
|
||||
dependencies: []
|
||||
priority: high
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
<!-- SECTION:DESCRIPTION:BEGIN -->
|
||||
Integrated overlay startup with `--start --texthooker` currently takes the minimal-startup path because startup mode flags treat any `args.texthooker` as texthooker-only. That skips app-ready texthooker service startup, so no server binds on port 5174 during normal SubMiner playback launches.
|
||||
<!-- SECTION:DESCRIPTION:END -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- AC:BEGIN -->
|
||||
- [x] #1 `--start --texthooker` uses full app-ready startup instead of minimal texthooker-only startup
|
||||
- [x] #2 Integrated playback launch starts the texthooker server on the configured/default port
|
||||
- [x] #3 Regression tests cover the startup-mode classification and integrated startup behavior
|
||||
<!-- AC:END -->
|
||||
|
||||
## Final Summary
|
||||
|
||||
<!-- SECTION:FINAL_SUMMARY:BEGIN -->
|
||||
Narrowed texthooker-only startup classification so integrated `--start --texthooker` no longer takes the minimal-startup path. Added CLI arg regression coverage, rebuilt the AppImage, installed it to `~/.local/bin/SubMiner.AppImage` with a timestamped backup, restarted against `/tmp/subminer-socket`, and verified listeners on 5174/6677/6678 plus browser connection state `Connected with ws://127.0.0.1:6678`.
|
||||
<!-- SECTION:FINAL_SUMMARY:END -->
|
||||
43
bun.lock
43
bun.lock
@@ -7,10 +7,9 @@
|
||||
"dependencies": {
|
||||
"@fontsource-variable/geist": "^5.2.8",
|
||||
"@fontsource-variable/geist-mono": "^5.2.7",
|
||||
"@hono/node-server": "^1.19.11",
|
||||
"@xhayper/discord-rpc": "^1.3.3",
|
||||
"axios": "^1.13.5",
|
||||
"commander": "^14.0.3",
|
||||
"discord-rpc": "^4.0.1",
|
||||
"hono": "^4.12.7",
|
||||
"jsonc-parser": "^3.3.1",
|
||||
"libsql": "^0.5.22",
|
||||
@@ -38,6 +37,12 @@
|
||||
|
||||
"@develar/schema-utils": ["@develar/schema-utils@2.6.5", "", { "dependencies": { "ajv": "^6.12.0", "ajv-keywords": "^3.4.1" } }, "sha512-0cp4PsWQ/9avqTVMCtZ+GirikIA36ikvjtHweU4/j8yLtgObI0+JUPhYFScgwlteveGB1rt3Cm8UhN04XayDig=="],
|
||||
|
||||
"@discordjs/collection": ["@discordjs/collection@2.1.1", "", {}, "sha512-LiSusze9Tc7qF03sLCujF5iZp7K+vRNEDBZ86FT9aQAv3vxMLihUvKvpsCWiQ2DJq1tVckopKm1rxomgNUc9hg=="],
|
||||
|
||||
"@discordjs/rest": ["@discordjs/rest@2.6.1", "", { "dependencies": { "@discordjs/collection": "^2.1.1", "@discordjs/util": "^1.2.0", "@sapphire/async-queue": "^1.5.3", "@sapphire/snowflake": "^3.5.5", "@vladfrangu/async_event_emitter": "^2.4.6", "discord-api-types": "^0.38.40", "magic-bytes.js": "^1.13.0", "tslib": "^2.6.3", "undici": "6.24.1" } }, "sha512-wwQdgjeaoYFiaG+atbqx6aJDpqW7JHAo0HrQkBTbYzM3/PJ3GweQIpgElNcGZ26DCUOXMyawYd0YF7vtr+fZXg=="],
|
||||
|
||||
"@discordjs/util": ["@discordjs/util@1.2.0", "", { "dependencies": { "discord-api-types": "^0.38.33" } }, "sha512-3LKP7F2+atl9vJFhaBjn4nOaSWahZ/yWjOvA4e5pnXkt2qyXRCHLxoBQy81GFtLGCq7K9lPm9R517M1U+/90Qg=="],
|
||||
|
||||
"@electron/asar": ["@electron/asar@3.4.1", "", { "dependencies": { "commander": "^5.0.0", "glob": "^7.1.6", "minimatch": "^3.0.4" }, "bin": { "asar": "bin/asar.js" } }, "sha512-i4/rNPRS84t0vSRa2HorerGRXWyF4vThfHesw0dmcWHp+cspK743UanA0suA5Q5y8kzY2y6YKrvbIUn69BCAiA=="],
|
||||
|
||||
"@electron/fuses": ["@electron/fuses@1.8.0", "", { "dependencies": { "chalk": "^4.1.1", "fs-extra": "^9.0.1", "minimist": "^1.2.5" }, "bin": { "electron-fuses": "dist/bin.js" } }, "sha512-zx0EIq78WlY/lBb1uXlziZmDZI4ubcCXIMJ4uGjXzZW0nS19TjSPeXPAjzzTmKQlJUZm0SbmZhPKP7tuQ1SsEw=="],
|
||||
@@ -110,8 +115,6 @@
|
||||
|
||||
"@fontsource-variable/geist-mono": ["@fontsource-variable/geist-mono@5.2.7", "", {}, "sha512-ZKlZ5sjtalb2TwXKs400mAGDlt/+2ENLNySPx0wTz3bP3mWARCsUW+rpxzZc7e05d2qGch70pItt3K4qttbIYA=="],
|
||||
|
||||
"@hono/node-server": ["@hono/node-server@1.19.11", "", { "peerDependencies": { "hono": "^4" } }, "sha512-dr8/3zEaB+p0D2n/IUrlPF1HZm586qgJNXK1a9fhg/PzdtkK7Ksd5l312tJX2yBuALqDYBlG20QEbayqPyxn+g=="],
|
||||
|
||||
"@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="],
|
||||
|
||||
"@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="],
|
||||
@@ -146,6 +149,10 @@
|
||||
|
||||
"@pkgjs/parseargs": ["@pkgjs/parseargs@0.11.0", "", {}, "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg=="],
|
||||
|
||||
"@sapphire/async-queue": ["@sapphire/async-queue@1.5.5", "", {}, "sha512-cvGzxbba6sav2zZkH8GPf2oGk9yYoD5qrNWdu9fRehifgnFZJMV+nuy2nON2roRO4yQQ+v7MK/Pktl/HgfsUXg=="],
|
||||
|
||||
"@sapphire/snowflake": ["@sapphire/snowflake@3.5.5", "", {}, "sha512-xzvBr1Q1c4lCe7i6sRnrofxeO1QTP/LKQ6A6qy0iB4x5yfiSfARMEQEghojzTNALDTcv8En04qYNIco9/K9eZQ=="],
|
||||
|
||||
"@sindresorhus/is": ["@sindresorhus/is@4.6.0", "", {}, "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw=="],
|
||||
|
||||
"@szmarczak/http-timer": ["@szmarczak/http-timer@4.0.6", "", { "dependencies": { "defer-to-connect": "^2.0.0" } }, "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w=="],
|
||||
@@ -174,6 +181,10 @@
|
||||
|
||||
"@types/yauzl": ["@types/yauzl@2.10.3", "", { "dependencies": { "@types/node": "*" } }, "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q=="],
|
||||
|
||||
"@vladfrangu/async_event_emitter": ["@vladfrangu/async_event_emitter@2.4.7", "", {}, "sha512-Xfe6rpCTxSxfbswi/W/Pz7zp1WWSNn4A0eW4mLkQUewCrXXtMj31lCg+iQyTkh/CkusZSq9eDflu7tjEDXUY6g=="],
|
||||
|
||||
"@xhayper/discord-rpc": ["@xhayper/discord-rpc@1.3.3", "", { "dependencies": { "@discordjs/rest": "^2.6.1", "@vladfrangu/async_event_emitter": "^2.4.7", "discord-api-types": "^0.38.42", "ws": "^8.20.0" } }, "sha512-Ih48GHiua7TtZgKO+f0uZPhCeQqb84fY2qUys/oMh8UbUfiUkUJLVCmd/v2AK0/pV33euh0aqSXo7+9LiPSwGw=="],
|
||||
|
||||
"@xmldom/xmldom": ["@xmldom/xmldom@0.8.11", "", {}, "sha512-cQzWCtO6C8TQiYl1ruKNn2U6Ao4o4WBBcbL61yJl84x+j5sOWWFU9X7DpND8XZG3daDppSsigMdfAIl2upQBRw=="],
|
||||
|
||||
"abbrev": ["abbrev@3.0.1", "", {}, "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg=="],
|
||||
@@ -212,8 +223,6 @@
|
||||
|
||||
"base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
|
||||
|
||||
"bindings": ["bindings@1.5.0", "", { "dependencies": { "file-uri-to-path": "1.0.0" } }, "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ=="],
|
||||
|
||||
"bl": ["bl@4.1.0", "", { "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w=="],
|
||||
|
||||
"boolean": ["boolean@3.2.0", "", {}, "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw=="],
|
||||
@@ -296,7 +305,7 @@
|
||||
|
||||
"dir-compare": ["dir-compare@4.2.0", "", { "dependencies": { "minimatch": "^3.0.5", "p-limit": "^3.1.0 " } }, "sha512-2xMCmOoMrdQIPHdsTawECdNPwlVFB9zGcz3kuhmBO6U3oU+UQjsue0i8ayLKpgBcm+hcXPMVSGUN9d+pvJ6+VQ=="],
|
||||
|
||||
"discord-rpc": ["discord-rpc@4.0.1", "", { "dependencies": { "node-fetch": "^2.6.1", "ws": "^7.3.1" }, "optionalDependencies": { "register-scheme": "github:devsnek/node-register-scheme" } }, "sha512-HOvHpbq5STRZJjQIBzwoKnQ0jHplbEWFWlPDwXXKm/bILh4nzjcg7mNqll0UY7RsjFoaXA7e/oYb/4lvpda2zA=="],
|
||||
"discord-api-types": ["discord-api-types@0.38.43", "", {}, "sha512-sSoBf/nK6m7BGtw65mi+QBuvEWaHE8MMziFLqWL+gT6ME/BLg34dRSVKS3Husx40uU06bvxUc3/X+D9Y6/zAbw=="],
|
||||
|
||||
"dmg-builder": ["dmg-builder@26.8.2", "", { "dependencies": { "app-builder-lib": "26.8.2", "builder-util": "26.8.1", "fs-extra": "^10.1.0", "iconv-lite": "^0.6.2", "js-yaml": "^4.1.0" }, "optionalDependencies": { "dmg-license": "^1.0.11" } }, "sha512-DaWI+p4DOqiFVZFMovdGYammBOyJAiHHFWUTQ0Z7gNc0twfdIN0LvyJ+vFsgZEDR1fjgbpCj690IVtbYIsZObQ=="],
|
||||
|
||||
@@ -362,8 +371,6 @@
|
||||
|
||||
"fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="],
|
||||
|
||||
"file-uri-to-path": ["file-uri-to-path@1.0.0", "", {}, "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw=="],
|
||||
|
||||
"filelist": ["filelist@1.0.6", "", { "dependencies": { "minimatch": "^5.0.1" } }, "sha512-5giy2PkLYY1cP39p17Ech+2xlpTRL9HLspOfEgm0L6CwBXBTgsK5ou0JtzYuepxkaQ/tvhCFIJ5uXo0OrM2DxA=="],
|
||||
|
||||
"follow-redirects": ["follow-redirects@1.15.11", "", {}, "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ=="],
|
||||
@@ -480,6 +487,8 @@
|
||||
|
||||
"lru-cache": ["lru-cache@6.0.0", "", { "dependencies": { "yallist": "^4.0.0" } }, "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA=="],
|
||||
|
||||
"magic-bytes.js": ["magic-bytes.js@1.13.0", "", {}, "sha512-afO2mnxW7GDTXMm5/AoN1WuOcdoKhtgXjIvHmobqTD1grNplhGdv3PFOyjCVmrnOZBIT/gD/koDKpYG+0mvHcg=="],
|
||||
|
||||
"make-fetch-happen": ["make-fetch-happen@14.0.3", "", { "dependencies": { "@npmcli/agent": "^3.0.0", "cacache": "^19.0.1", "http-cache-semantics": "^4.1.1", "minipass": "^7.0.2", "minipass-fetch": "^4.0.0", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", "negotiator": "^1.0.0", "proc-log": "^5.0.0", "promise-retry": "^2.0.1", "ssri": "^12.0.0" } }, "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ=="],
|
||||
|
||||
"matcher": ["matcher@3.0.0", "", { "dependencies": { "escape-string-regexp": "^4.0.0" } }, "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng=="],
|
||||
@@ -526,8 +535,6 @@
|
||||
|
||||
"node-api-version": ["node-api-version@0.2.1", "", { "dependencies": { "semver": "^7.3.5" } }, "sha512-2xP/IGGMmmSQpI1+O/k72jF/ykvZ89JeuKX3TLJAYPDVLUalrshrLHkeVcCCZqG/eEa635cr8IBYzgnDvM2O8Q=="],
|
||||
|
||||
"node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
|
||||
"node-gyp": ["node-gyp@11.5.0", "", { "dependencies": { "env-paths": "^2.2.0", "exponential-backoff": "^3.1.1", "graceful-fs": "^4.2.6", "make-fetch-happen": "^14.0.3", "nopt": "^8.0.0", "proc-log": "^5.0.0", "semver": "^7.3.5", "tar": "^7.4.3", "tinyglobby": "^0.2.12", "which": "^5.0.0" }, "bin": { "node-gyp": "bin/node-gyp.js" } }, "sha512-ra7Kvlhxn5V9Slyus0ygMa2h+UqExPqUIkfk7Pc8QTLT956JLSy51uWFwHtIYy0vI8cB4BDhc/S03+880My/LQ=="],
|
||||
|
||||
"nopt": ["nopt@8.1.0", "", { "dependencies": { "abbrev": "^3.0.0" }, "bin": { "nopt": "bin/nopt.js" } }, "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A=="],
|
||||
@@ -590,8 +597,6 @@
|
||||
|
||||
"readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="],
|
||||
|
||||
"register-scheme": ["register-scheme@github:devsnek/node-register-scheme#e7cc9a6", { "dependencies": { "bindings": "^1.3.0", "node-addon-api": "^1.3.0" } }, "devsnek-node-register-scheme-e7cc9a6", "sha512-VwUWN3aKIg/yn7T8axW20Y1+4wGALIQectBmkmwSJfLrCycpVepGP/+KHjXSL/Ga8N1SmewL49kESgIhW7HbWg=="],
|
||||
|
||||
"require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="],
|
||||
|
||||
"resedit": ["resedit@1.7.2", "", { "dependencies": { "pe-library": "^0.4.1" } }, "sha512-vHjcY2MlAITJhC0eRD/Vv8Vlgmu9Sd3LX9zZvtGzU5ZImdTN3+d6e/4mnTyV8vEbyf1sgNIrWxhWlrys52OkEA=="],
|
||||
@@ -676,14 +681,16 @@
|
||||
|
||||
"tmp-promise": ["tmp-promise@3.0.3", "", { "dependencies": { "tmp": "^0.2.0" } }, "sha512-RwM7MoPojPxsOBYnyd2hy0bxtIlVrihNs9pj5SUvY8Zz1sQcQG2tG1hSr8PDxfgEB8RNKDhqbIlroIarSNDNsQ=="],
|
||||
|
||||
"tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||
|
||||
"truncate-utf8-bytes": ["truncate-utf8-bytes@1.0.2", "", { "dependencies": { "utf8-byte-length": "^1.0.1" } }, "sha512-95Pu1QXQvruGEhv62XCMO3Mm90GscOCClvrIUwCM0PYOXK3kaF3l3sIHxx71ThJfcbM2O5Au6SO3AWCSEfW4mQ=="],
|
||||
|
||||
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"type-fest": ["type-fest@0.13.1", "", {}, "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg=="],
|
||||
|
||||
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
|
||||
|
||||
"undici": ["undici@6.24.1", "", {}, "sha512-sC+b0tB1whOCzbtlx20fx3WgCXwkW627p4EA9uM+/tNNPkSS+eSEld6pAs9nDv7WbY1UUljBMYPtu9BCOrCWKA=="],
|
||||
|
||||
"undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="],
|
||||
|
||||
"unique-filename": ["unique-filename@4.0.0", "", { "dependencies": { "unique-slug": "^5.0.0" } }, "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ=="],
|
||||
@@ -702,10 +709,6 @@
|
||||
|
||||
"wcwidth": ["wcwidth@1.0.1", "", { "dependencies": { "defaults": "^1.0.3" } }, "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg=="],
|
||||
|
||||
"webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||
|
||||
"whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||
|
||||
"which": ["which@5.0.0", "", { "dependencies": { "isexe": "^3.1.1" }, "bin": { "node-which": "bin/which.js" } }, "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ=="],
|
||||
|
||||
"wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||
@@ -772,8 +775,6 @@
|
||||
|
||||
"cross-spawn/which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="],
|
||||
|
||||
"discord-rpc/ws": ["ws@7.5.10", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": "^5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ=="],
|
||||
|
||||
"electron/@types/node": ["@types/node@22.19.15", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg=="],
|
||||
|
||||
"electron-winstaller/fs-extra": ["fs-extra@7.0.1", "", { "dependencies": { "graceful-fs": "^4.1.2", "jsonfile": "^4.0.0", "universalify": "^0.1.0" } }, "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw=="],
|
||||
|
||||
6
changes/251-docs-site-sidebar.md
Normal file
6
changes/251-docs-site-sidebar.md
Normal file
@@ -0,0 +1,6 @@
|
||||
type: docs
|
||||
area: docs-site
|
||||
|
||||
- Added a dedicated Subtitle Sidebar guide and linked it from the homepage and configuration docs.
|
||||
- Linked Jimaku integration from the homepage to its dedicated docs page.
|
||||
- Refreshed docs-site theme tokens and hover/selection styling for the updated pages.
|
||||
5
changes/252-youtube-playback-socket-path.md
Normal file
5
changes/252-youtube-playback-socket-path.md
Normal file
@@ -0,0 +1,5 @@
|
||||
type: fixed
|
||||
area: main
|
||||
|
||||
- Resolve the YouTube playback socket path lazily so startup honors CLI and config overrides.
|
||||
- Add regression coverage for the lazy socket-path lookup during Windows mpv startup.
|
||||
5
changes/253-aur-release-best-effort.md
Normal file
5
changes/253-aur-release-best-effort.md
Normal file
@@ -0,0 +1,5 @@
|
||||
type: internal
|
||||
area: release
|
||||
|
||||
- Retried AUR clone and push operations in the tagged release workflow.
|
||||
- Kept GitHub Releases green when AUR publish flakes and needs manual follow-up.
|
||||
5
changes/259-texthooker-integrated-startup.md
Normal file
5
changes/259-texthooker-integrated-startup.md
Normal file
@@ -0,0 +1,5 @@
|
||||
type: fixed
|
||||
area: main
|
||||
|
||||
- Keep integrated `--start --texthooker` launches on the full app-ready startup path so the texthooker page and websocket servers start together during normal playback startup.
|
||||
- Stop the mpv/plugin auto-start flow from spawning a separate standalone texthooker helper during normal `subminer <video>` launches.
|
||||
5
changes/260-playlist-browser.md
Normal file
5
changes/260-playlist-browser.md
Normal file
@@ -0,0 +1,5 @@
|
||||
type: added
|
||||
area: overlay
|
||||
|
||||
- Added a playlist browser overlay modal for browsing sibling video files and the live mpv queue during playback.
|
||||
- Added the default `Ctrl+Alt+P` keybinding to open the playlist browser and manage queue order without leaving playback.
|
||||
@@ -498,6 +498,7 @@
|
||||
// ==========================================
|
||||
"discordPresence": {
|
||||
"enabled": false, // Enable optional Discord Rich Presence updates. Values: true | false
|
||||
"presenceStyle": "default", // Presence card text preset: "default" (clean bilingual), "meme" (Mining and crafting), "japanese" (fully JP), or "minimal".
|
||||
"updateIntervalMs": 3000, // Minimum interval between presence payload updates.
|
||||
"debounceMs": 750 // Debounce delay used to collapse bursty presence updates.
|
||||
}, // Optional Discord Rich Presence activity card updates for current playback/study session.
|
||||
|
||||
@@ -74,7 +74,9 @@ export default {
|
||||
{ text: 'Configuration', link: '/configuration' },
|
||||
{ text: 'Keyboard Shortcuts', link: '/shortcuts' },
|
||||
{ text: 'Subtitle Annotations', link: '/subtitle-annotations' },
|
||||
{ text: 'Subtitle Sidebar', link: '/subtitle-sidebar' },
|
||||
{ text: 'Immersion Tracking', link: '/immersion-tracking' },
|
||||
{ text: 'JLPT Vocabulary Bundle', link: '/jlpt-vocab-bundle' },
|
||||
{ text: 'Troubleshooting', link: '/troubleshooting' },
|
||||
],
|
||||
},
|
||||
|
||||
@@ -10,13 +10,18 @@ let mermaidLoader: Promise<any> | null = null;
|
||||
let plausibleTrackerInitialized = false;
|
||||
const MERMAID_MODAL_ID = 'mermaid-diagram-modal';
|
||||
const PLAUSIBLE_DOMAIN = 'subminer.moe';
|
||||
const PLAUSIBLE_ENDPOINT = 'https://worker.subminer.moe/api/event';
|
||||
const PLAUSIBLE_ENABLED_HOSTNAMES = new Set(['docs.subminer.moe']);
|
||||
const PLAUSIBLE_ENDPOINT = 'https://worker.subminer.moe/api/capture';
|
||||
|
||||
async function initPlausibleTracker() {
|
||||
if (typeof window === 'undefined' || plausibleTrackerInitialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!PLAUSIBLE_ENABLED_HOSTNAMES.has(window.location.hostname)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const { init } = await import('@plausible-analytics/tracker');
|
||||
init({
|
||||
domain: PLAUSIBLE_DOMAIN,
|
||||
|
||||
@@ -34,6 +34,25 @@
|
||||
system-ui,
|
||||
sans-serif;
|
||||
--tui-transition: 180ms ease;
|
||||
|
||||
/* Theme-specific values — overridden in .dark below */
|
||||
--tui-nav-bg: color-mix(in srgb, var(--vp-c-bg-alt) 88%, transparent);
|
||||
--tui-table-hover-bg: color-mix(in srgb, var(--vp-c-bg-soft) 80%, transparent);
|
||||
--tui-link-underline: color-mix(in srgb, var(--vp-c-brand-1) 40%, transparent);
|
||||
--tui-selection-bg: hsla(267, 83%, 45%, 0.14);
|
||||
--tui-hero-glow: hsla(267, 83%, 45%, 0.05);
|
||||
--tui-step-hover-bg: var(--vp-c-bg-alt);
|
||||
--tui-step-hover-glow: color-mix(in srgb, var(--vp-c-brand-1) 30%, transparent);
|
||||
}
|
||||
|
||||
.dark {
|
||||
--tui-nav-bg: hsla(232, 23%, 18%, 0.82);
|
||||
--tui-table-hover-bg: hsla(232, 23%, 18%, 0.4);
|
||||
--tui-link-underline: hsla(267, 83%, 80%, 0.3);
|
||||
--tui-selection-bg: hsla(267, 83%, 80%, 0.22);
|
||||
--tui-hero-glow: hsla(267, 83%, 80%, 0.06);
|
||||
--tui-step-hover-bg: hsla(232, 23%, 18%, 0.6);
|
||||
--tui-step-hover-glow: hsla(267, 83%, 80%, 0.3);
|
||||
}
|
||||
|
||||
:root {
|
||||
@@ -48,7 +67,7 @@
|
||||
|
||||
/* === Selection === */
|
||||
::selection {
|
||||
background: hsla(267, 83%, 80%, 0.22);
|
||||
background: var(--tui-selection-bg);
|
||||
color: var(--vp-c-text-1);
|
||||
}
|
||||
|
||||
@@ -102,7 +121,7 @@ button,
|
||||
}
|
||||
|
||||
.VPNav .VPNavBar:not(.has-sidebar) {
|
||||
background: hsla(232, 23%, 18%, 0.82);
|
||||
background: var(--tui-nav-bg);
|
||||
}
|
||||
|
||||
.VPNav .VPNavBar.has-sidebar .content {
|
||||
@@ -245,13 +264,13 @@ button,
|
||||
}
|
||||
|
||||
.vp-doc table tr:hover td {
|
||||
background: hsla(232, 23%, 18%, 0.4);
|
||||
background: var(--tui-table-hover-bg);
|
||||
}
|
||||
|
||||
/* === Links === */
|
||||
.vp-doc a {
|
||||
text-decoration: none;
|
||||
border-bottom: 1px solid hsla(267, 83%, 80%, 0.3);
|
||||
border-bottom: 1px solid var(--tui-link-underline);
|
||||
transition: border-color var(--tui-transition), color var(--tui-transition);
|
||||
}
|
||||
|
||||
@@ -653,7 +672,7 @@ body {
|
||||
height: 400px;
|
||||
background: radial-gradient(
|
||||
ellipse at center,
|
||||
hsla(267, 83%, 80%, 0.06) 0%,
|
||||
var(--tui-hero-glow) 0%,
|
||||
transparent 70%
|
||||
);
|
||||
pointer-events: none;
|
||||
|
||||
@@ -39,6 +39,7 @@ src/
|
||||
types.ts # Shared type definitions
|
||||
main/ # Main-process composition/runtime adapters
|
||||
app-lifecycle.ts # App lifecycle + app-ready runtime runner factories
|
||||
character-dictionary-runtime.ts # Character-dictionary orchestration/public runtime API
|
||||
cli-runtime.ts # CLI command runtime service adapters
|
||||
config-validation.ts # Startup/hot-reload config error formatting and fail-fast helpers
|
||||
dependencies.ts # Shared dependency builders for IPC/runtime services
|
||||
@@ -53,6 +54,7 @@ src/
|
||||
startup-lifecycle.ts # Lifecycle runtime runner adapter
|
||||
state.ts # Application runtime state container + reducer transitions
|
||||
subsync-runtime.ts # Subsync command runtime adapter
|
||||
character-dictionary-runtime/ # Character-dictionary fetch/build/cache modules + focused tests
|
||||
runtime/
|
||||
composers/ # High-level composition clusters used by main.ts
|
||||
domains/ # Domain barrel exports (startup/overlay/mpv/jellyfin/...)
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
# Changelog
|
||||
|
||||
## v0.10.0 (2026-03-29)
|
||||
- Fixed stats startup so the immersion tracker can run when `Bun.serve` is unavailable.
|
||||
- Added a Node `http` fallback for Electron/runtime paths that do not expose Bun, so stats keeps working there too.
|
||||
- Updated Discord Rich Presence to the maintained `@xhayper/discord-rpc` wrapper.
|
||||
- Fixed the macOS visible-overlay toggle path so manual hides stay hidden and the plugin uses the explicit visible-overlay toggle command.
|
||||
- Restored macOS mpv passthrough while the overlay subtitle sidebar is open so clicks outside the sidebar can refocus mpv and keep native keybindings working.
|
||||
|
||||
## v0.9.3 (2026-03-25)
|
||||
- Moved YouTube primary subtitle language defaults to `youtube.primarySubLanguages`.
|
||||
- Removed the placeholder YouTube subtitle retime step; downloaded primary subtitle tracks are now used directly.
|
||||
- Removed the old internal YouTube retime helper and its tests.
|
||||
- Clarified optional `alass` / `ffsubsync` subtitle-sync setup and fallback behavior in the docs.
|
||||
- Removed the legacy `youtubeSubgen.primarySubLanguages` config path from generated config and docs.
|
||||
|
||||
## v0.9.2 (2026-03-25)
|
||||
- Fixed overlay pointer tracking so Windows click-through toggles immediately when the cursor enters or leaves subtitle regions.
|
||||
- Fixed Windows overlay window tracking on scaled displays by converting native tracked window bounds to Electron DIP coordinates.
|
||||
|
||||
@@ -390,6 +390,8 @@ The sidebar is only available when the active subtitle source has been parsed in
|
||||
|
||||
`embedded` layout is intended to act like a split-pane view: it reserves player space with a right-side video margin and keeps interaction in both the player area and sidebar. If you see unexpected offset behavior in your environment, switch back to `overlay` to isolate sidebar placement.
|
||||
|
||||
For full details on layout modes, behavior, and the keyboard shortcut, see the [Subtitle Sidebar](/subtitle-sidebar) page.
|
||||
|
||||
`jlptColors` keys are:
|
||||
|
||||
| Key | Default | Description |
|
||||
@@ -469,6 +471,7 @@ See `config.example.jsonc` for detailed configuration options and more examples.
|
||||
| `Space` | `["cycle", "pause"]` | Toggle pause |
|
||||
| `KeyJ` | `["cycle", "sid"]` | Cycle primary subtitle track |
|
||||
| `Shift+KeyJ` | `["cycle", "secondary-sid"]` | Cycle secondary subtitle track |
|
||||
| `Ctrl+Alt+KeyP` | `["__playlist-browser-open"]` | Open playlist browser |
|
||||
| `Ctrl+Alt+KeyC` | `["__youtube-picker-open"]` | Open the manual YouTube subtitle picker |
|
||||
| `ArrowRight` | `["seek", 5]` | Seek forward 5 seconds |
|
||||
| `ArrowLeft` | `["seek", -5]` | Seek backward 5 seconds |
|
||||
@@ -505,7 +508,7 @@ See `config.example.jsonc` for detailed configuration options and more examples.
|
||||
{ "key": "Space", "command": null }
|
||||
```
|
||||
|
||||
**Special commands:** Commands prefixed with `__` are handled internally by the overlay rather than sent to mpv. `__replay-subtitle` replays the current subtitle and pauses at its end. `__play-next-subtitle` seeks to the next subtitle, plays it, and pauses at its end. `__sub-delay-next-line` shifts subtitle delay so the active line aligns to the next cue start in the active subtitle source. `__sub-delay-prev-line` shifts subtitle delay so the active line aligns to the previous cue start. `__runtime-options-open` opens the runtime options palette. `__runtime-option-cycle:<id>[:next|prev]` cycles a runtime option value.
|
||||
**Special commands:** Commands prefixed with `__` are handled internally by the overlay rather than sent to mpv. `__playlist-browser-open` opens the split-pane playlist browser for the current file's parent directory and the live mpv queue. `__replay-subtitle` replays the current subtitle and pauses at its end. `__play-next-subtitle` seeks to the next subtitle, plays it, and pauses at its end. `__sub-delay-next-line` shifts subtitle delay so the active line aligns to the next cue start in the active subtitle source. `__sub-delay-prev-line` shifts subtitle delay so the active line aligns to the previous cue start. `__runtime-options-open` opens the runtime options palette. `__runtime-option-cycle:<id>[:next|prev]` cycles a runtime option value.
|
||||
|
||||
**Supported commands:** Any valid mpv JSON IPC command array (`["cycle", "pause"]`, `["seek", 5]`, `["script-binding", "..."]`, etc.)
|
||||
|
||||
@@ -1197,30 +1200,38 @@ Discord Rich Presence is optional and disabled by default. When enabled, SubMine
|
||||
{
|
||||
"discordPresence": {
|
||||
"enabled": true,
|
||||
"presenceStyle": "default",
|
||||
"updateIntervalMs": 3000,
|
||||
"debounceMs": 750
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Option | Values | Description |
|
||||
| ------------------ | --------------- | ---------------------------------------------------------- |
|
||||
| `enabled` | `true`, `false` | Enable Discord Rich Presence updates (default: `false`) |
|
||||
| `updateIntervalMs` | number | Minimum interval between activity updates in milliseconds |
|
||||
| `debounceMs` | number | Debounce window for bursty playback events in milliseconds |
|
||||
| Option | Values | Description |
|
||||
| ------------------ | ------------------------------------------------- | ---------------------------------------------------------- |
|
||||
| `enabled` | `true`, `false` | Enable Discord Rich Presence updates (default: `false`) |
|
||||
| `presenceStyle` | `"default"`, `"meme"`, `"japanese"`, `"minimal"` | Card text preset (default: `"default"`) |
|
||||
| `updateIntervalMs` | number | Minimum interval between activity updates in milliseconds |
|
||||
| `debounceMs` | number | Debounce window for bursty playback events in milliseconds |
|
||||
|
||||
Setup steps:
|
||||
|
||||
1. Set `discordPresence.enabled` to `true`.
|
||||
2. Restart SubMiner.
|
||||
2. Optionally set `discordPresence.presenceStyle` to choose a card text preset.
|
||||
3. Restart SubMiner.
|
||||
|
||||
SubMiner uses a fixed official activity card style for all users:
|
||||
#### Presence style presets
|
||||
|
||||
- Details: current media title while playing (fallback: `Mining and crafting (Anki cards)` when idle/disconnected)
|
||||
- State: `Playing mm:ss / mm:ss` or `Paused mm:ss / mm:ss` (fallback: `Idle`)
|
||||
- Large image key/text: `subminer-logo` / `SubMiner`
|
||||
- Small image key/text: `study` / `Sentence Mining`
|
||||
- No activity button by default
|
||||
While playing media, the **Details** line always shows the current media title and **State** shows `Playing mm:ss / mm:ss` or `Paused mm:ss / mm:ss`. The preset controls what appears when idle and the tooltip text on images.
|
||||
|
||||
| Preset | Idle details | Small image text | Vibe |
|
||||
| ------------ | ----------------------------------- | ------------------ | --------------------------------------- |
|
||||
| **`default`**| `Sentence Mining` | `日本語学習中` | Clean, bilingual flair |
|
||||
| `meme` | `Mining and crafting (Anki cards)` | `Sentence Mining` | Minecraft-inspired joke |
|
||||
| `japanese` | `文の採掘中` | `イマージョン学習` | Fully Japanese |
|
||||
| `minimal` | `SubMiner` | *(none)* | Bare essentials, no small image overlay |
|
||||
|
||||
All presets use the `subminer-logo` large image with `SubMiner` tooltip. No activity button is shown by default.
|
||||
|
||||
Troubleshooting:
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ features:
|
||||
alt: Subtitle download icon
|
||||
title: Subtitle Download & Sync
|
||||
details: Search and pull subtitles from Jimaku, then auto-sync timing with alass or ffsubsync — all from the overlay.
|
||||
link: /configuration#jimaku
|
||||
link: /jimaku-integration
|
||||
linkText: Jimaku integration
|
||||
- icon:
|
||||
src: /assets/tokenization.svg
|
||||
@@ -223,12 +223,12 @@ const demoAssetVersion = '20260223-2';
|
||||
}
|
||||
|
||||
.workflow-step:hover {
|
||||
background: hsla(232, 23%, 18%, 0.6);
|
||||
background: var(--tui-step-hover-bg);
|
||||
}
|
||||
|
||||
.workflow-step:hover .step-number {
|
||||
color: var(--vp-c-brand-1);
|
||||
text-shadow: 0 0 12px hsla(267, 83%, 80%, 0.3);
|
||||
text-shadow: 0 0 12px var(--tui-step-hover-glow);
|
||||
}
|
||||
|
||||
.workflow-connector {
|
||||
|
||||
@@ -172,7 +172,7 @@ Install `mpv` separately and ensure `mpv.exe` is on `PATH`. `ffmpeg` is still re
|
||||
### Windows Usage Notes
|
||||
|
||||
- Launch `SubMiner.exe` once to let the first-run setup flow seed `%APPDATA%\\SubMiner\\config.jsonc`, offer mpv plugin installation, open bundled Yomitan settings, and optionally create `SubMiner mpv` Start Menu/Desktop shortcuts.
|
||||
- If you use the mpv plugin, leave `binary_path` empty unless SubMiner is installed in a non-standard location.
|
||||
- First-run mpv plugin installs pin `binary_path` to the current `SubMiner.exe` automatically. Manual plugin configs can leave `binary_path` empty unless SubMiner is installed in a non-standard location.
|
||||
- Windows plugin installs rewrite `socket_path` to `\\.\pipe\subminer-socket`; do not keep `/tmp/subminer-socket` on Windows.
|
||||
- Native window tracking is built in on Windows; no `xdotool`, `xwininfo`, or compositor-specific helper is required.
|
||||
|
||||
@@ -201,6 +201,7 @@ mpv must be launched with `--input-ipc-server=/tmp/subminer-socket` for SubMiner
|
||||
:::
|
||||
|
||||
On Windows, the packaged plugin config is rewritten to `socket_path=\\.\pipe\subminer-socket`.
|
||||
First-run setup also pins `binary_path` to the current app binary so mpv launches the same SubMiner build that installed the plugin.
|
||||
|
||||
```bash
|
||||
# Option 1: install from release assets bundle
|
||||
|
||||
@@ -131,6 +131,6 @@ Verify mpv is running and connected via IPC. SubMiner loads the subtitle by issu
|
||||
|
||||
## Related
|
||||
|
||||
- [Configuration Reference](/configuration#jimaku) — full config section
|
||||
- [Configuration Reference](/configuration#jimaku) — full config options
|
||||
- [Mining Workflow](/mining-workflow#jimaku-subtitle-search) — how Jimaku fits into the sentence mining loop
|
||||
- [Troubleshooting](/troubleshooting#jimaku) — additional error guidance
|
||||
|
||||
@@ -6,14 +6,17 @@ const docsThemePath = new URL('./.vitepress/theme/index.ts', import.meta.url);
|
||||
const docsConfigContents = readFileSync(docsConfigPath, 'utf8');
|
||||
const docsThemeContents = readFileSync(docsThemePath, 'utf8');
|
||||
|
||||
test('docs site keeps docs hostname while sending plausible events to subminer.moe via worker.subminer.moe', () => {
|
||||
test('docs site keeps docs hostname while sending plausible events to subminer.moe via worker.subminer.moe capture endpoint', () => {
|
||||
expect(docsConfigContents).toContain("hostname: 'https://docs.subminer.moe'");
|
||||
expect(docsThemeContents).toContain("const PLAUSIBLE_DOMAIN = 'subminer.moe'");
|
||||
expect(docsThemeContents).toContain('const PLAUSIBLE_ENABLED_HOSTNAMES = new Set([');
|
||||
expect(docsThemeContents).toContain("'docs.subminer.moe'");
|
||||
expect(docsThemeContents).toContain(
|
||||
"const PLAUSIBLE_ENDPOINT = 'https://worker.subminer.moe/api/event'",
|
||||
"const PLAUSIBLE_ENDPOINT = 'https://worker.subminer.moe/api/capture'",
|
||||
);
|
||||
expect(docsThemeContents).toContain('@plausible-analytics/tracker');
|
||||
expect(docsThemeContents).toContain('const { init } = await import');
|
||||
expect(docsThemeContents).toContain('!PLAUSIBLE_ENABLED_HOSTNAMES.has(window.location.hostname)');
|
||||
expect(docsThemeContents).toContain('domain: PLAUSIBLE_DOMAIN');
|
||||
expect(docsThemeContents).toContain('endpoint: PLAUSIBLE_ENDPOINT');
|
||||
expect(docsThemeContents).toContain('outboundLinks: true');
|
||||
|
||||
@@ -498,6 +498,7 @@
|
||||
// ==========================================
|
||||
"discordPresence": {
|
||||
"enabled": false, // Enable optional Discord Rich Presence updates. Values: true | false
|
||||
"presenceStyle": "default", // Presence card text preset: "default" (clean bilingual), "meme" (Mining and crafting), "japanese" (fully JP), or "minimal".
|
||||
"updateIntervalMs": 3000, // Minimum interval between presence payload updates.
|
||||
"debounceMs": 750 // Debounce delay used to collapse bursty presence updates.
|
||||
}, // Optional Discord Rich Presence activity card updates for current playback/study session.
|
||||
|
||||
@@ -40,6 +40,7 @@ These control playback and subtitle display. They require overlay window focus.
|
||||
| `Space` | Toggle mpv pause |
|
||||
| `J` | Cycle primary subtitle track |
|
||||
| `Shift+J` | Cycle secondary subtitle track |
|
||||
| `Ctrl+Alt+P` | Open playlist browser for current directory + queue |
|
||||
| `ArrowRight` | Seek forward 5 seconds |
|
||||
| `ArrowLeft` | Seek backward 5 seconds |
|
||||
| `ArrowUp` | Seek forward 60 seconds |
|
||||
@@ -56,7 +57,7 @@ These control playback and subtitle display. They require overlay window focus.
|
||||
| `Right-click + drag` | Reposition subtitles (on subtitle area) |
|
||||
| `Ctrl/Cmd+A` | Append clipboard video path to mpv playlist |
|
||||
|
||||
These keybindings can be overridden or disabled via the `keybindings` config array.
|
||||
These keybindings can be overridden or disabled via the `keybindings` config array. The playlist browser opens a split overlay modal with sibling video files on the left and the live mpv playlist on the right.
|
||||
|
||||
Mouse-hover playback behavior is configured separately from shortcuts: `subtitleStyle.autoPauseVideoOnHover` defaults to `true` (pause on subtitle hover, resume on leave).
|
||||
|
||||
|
||||
71
docs-site/subtitle-sidebar.md
Normal file
71
docs-site/subtitle-sidebar.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Subtitle Sidebar
|
||||
|
||||
The subtitle sidebar displays the full parsed cue list for the active subtitle file as a scrollable panel alongside mpv. It lets you review past and upcoming lines, click any cue to seek directly to that moment, and follow along without depending on the transient overlay subtitles.
|
||||
|
||||
The sidebar is opt-in and disabled by default. Enable it under `subtitleSidebar.enabled` in your config.
|
||||
|
||||
## How It Works
|
||||
|
||||
When SubMiner parses the active subtitle source into a cue list, the sidebar becomes available. Toggle it with the `\` key (configurable via `subtitleSidebar.toggleKey`). While open:
|
||||
|
||||
- The active cue is highlighted and kept in view as playback advances (when `autoScroll` is `true`).
|
||||
- Clicking any cue seeks mpv to that timestamp.
|
||||
- The sidebar stays synchronized with the overlay — media transitions and subtitle source changes update both simultaneously.
|
||||
|
||||
The sidebar only appears when a parsed cue list is available. External subtitle sources that SubMiner cannot parse (for example, embedded ASS tracks rendered directly by mpv) will not populate the sidebar.
|
||||
|
||||
## Layout Modes
|
||||
|
||||
Two layout modes are available via `subtitleSidebar.layout`:
|
||||
|
||||
**`overlay`** (default) — The sidebar floats over mpv as a panel. It does not affect the player window size or position.
|
||||
|
||||
**`embedded`** — Reserves space on the right side of the player and shifts the video area to mimic a split-pane layout. Useful if you want the cue list visible without it covering the video. If you see unexpected positioning in your environment, switch back to `overlay` to isolate the issue.
|
||||
|
||||
## Configuration
|
||||
|
||||
Enable and configure the sidebar under `subtitleSidebar` in your config file:
|
||||
|
||||
```json
|
||||
{
|
||||
"subtitleSidebar": {
|
||||
"enabled": false,
|
||||
"autoOpen": false,
|
||||
"layout": "overlay",
|
||||
"toggleKey": "Backslash",
|
||||
"pauseVideoOnHover": false,
|
||||
"autoScroll": true,
|
||||
"fontFamily": "\"M PLUS 1\", \"Noto Sans CJK JP\", sans-serif",
|
||||
"fontSize": 16
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| --------------------------- | ------- | ------------ | -------------------------------------------------------------------------------------------------- |
|
||||
| `enabled` | boolean | `false` | Enable subtitle sidebar support |
|
||||
| `autoOpen` | boolean | `false` | Open the sidebar automatically on overlay startup |
|
||||
| `layout` | string | `"overlay"` | `"overlay"` floats over mpv; `"embedded"` reserves right-side player space |
|
||||
| `toggleKey` | string | `"Backslash"` | `KeyboardEvent.code` for the toggle shortcut |
|
||||
| `pauseVideoOnHover` | boolean | `false` | Pause playback while hovering the cue list |
|
||||
| `autoScroll` | boolean | `true` | Keep the active cue in view during playback |
|
||||
| `maxWidth` | number | `420` | Maximum sidebar width in CSS pixels |
|
||||
| `opacity` | number | `0.95` | Sidebar opacity between `0` and `1` |
|
||||
| `backgroundColor` | string | — | Sidebar shell background color |
|
||||
| `textColor` | string | — | Default cue text color |
|
||||
| `fontFamily` | string | — | CSS `font-family` applied to cue text |
|
||||
| `fontSize` | number | `16` | Base cue font size in CSS pixels |
|
||||
| `timestampColor` | string | — | Cue timestamp color |
|
||||
| `activeLineColor` | string | — | Active cue text color |
|
||||
| `activeLineBackgroundColor` | string | — | Active cue background color |
|
||||
| `hoverLineBackgroundColor` | string | — | Hovered cue background color |
|
||||
|
||||
Default colors use Catppuccin Macchiato with a semi-transparent shell so the panel stays readable without feeling like a solid overlay.
|
||||
|
||||
## Keyboard Shortcut
|
||||
|
||||
| Key | Action | Config key |
|
||||
| --- | ----------------------- | ------------------------------ |
|
||||
| `\` | Toggle subtitle sidebar | `subtitleSidebar.toggleKey` |
|
||||
|
||||
The toggle is overlay-local and only opens when SubMiner has a parsed cue list for the active subtitle source. See [Keyboard Shortcuts](/shortcuts) for the full shortcut reference.
|
||||
@@ -295,6 +295,8 @@ See [Keyboard Shortcuts](/shortcuts) for the full reference, including mining sh
|
||||
`Alt+Shift+Y` is fixed and not configurable. All other shortcuts can be changed under `shortcuts` in your config.
|
||||
:::
|
||||
|
||||
Useful overlay-local default keybinding: `Ctrl+Alt+P` opens the playlist browser for the current video's parent directory and the live mpv queue so you can append, reorder, remove, or jump between episodes without leaving playback.
|
||||
|
||||
Hovering over subtitle text pauses mpv by default; leaving resumes it. Disable with `subtitleStyle.autoPauseVideoOnHover: false`. To also pause while the Yomitan popup is open, set `subtitleStyle.autoPauseVideoOnYomitanPopup: true`.
|
||||
|
||||
### Drag-and-Drop
|
||||
|
||||
@@ -21,6 +21,7 @@ Read when: you need internal architecture, workflow, verification, or release gu
|
||||
|
||||
- New feature or refactor: [Workflow](./workflow/README.md), then [Architecture](./architecture/README.md)
|
||||
- Test/build/release work: [Verification](./workflow/verification.md), then [Release Guide](./RELEASING.md)
|
||||
- Coverage lane selection or LCOV artifact path: [Verification](./workflow/verification.md)
|
||||
- “What owns this behavior?”: [Domains](./architecture/domains.md)
|
||||
- “Can these modules depend on each other?”: [Layering](./architecture/layering.md)
|
||||
- “What doc should exist for this?”: [Catalog](./knowledge-base/catalog.md)
|
||||
|
||||
@@ -34,4 +34,5 @@ Notes:
|
||||
- Do not tag while `changes/*.md` fragments still exist.
|
||||
- If you need to repair a published release body (for example, a prior version’s section was omitted), regenerate notes from `CHANGELOG.md` and re-edit the release with `gh release edit --notes-file`.
|
||||
- Tagged release workflow now also attempts to update `subminer-bin` on the AUR after GitHub Release publication.
|
||||
- AUR publish is best-effort: the workflow retries transient SSH clone/push failures, then warns and leaves the GitHub Release green if AUR still fails. Follow up with a manual `git push aur master` from the AUR checkout when needed.
|
||||
- Required GitHub Actions secret: `AUR_SSH_PRIVATE_KEY`. Add the matching public key to your AUR account before relying on the automation.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Architecture Map
|
||||
|
||||
Status: active
|
||||
Last verified: 2026-03-13
|
||||
Last verified: 2026-03-26
|
||||
Owner: Kyle Yasuda
|
||||
Read when: runtime ownership, composition boundaries, or layering questions
|
||||
|
||||
@@ -24,9 +24,11 @@ The desktop app keeps `src/main.ts` as composition root and pushes behavior into
|
||||
## Current Shape
|
||||
|
||||
- `src/main/` owns composition, runtime setup, IPC wiring, and app lifecycle adapters.
|
||||
- `src/main/boot/` owns boot-phase assembly seams so `src/main.ts` can stay focused on lifecycle coordination and startup-path selection.
|
||||
- `src/core/services/` owns focused runtime services plus pure or side-effect-bounded logic.
|
||||
- `src/renderer/` owns overlay rendering and input behavior.
|
||||
- `src/config/` owns config definitions, defaults, loading, and resolution.
|
||||
- `src/types/` owns shared cross-runtime contracts via domain entrypoints; `src/types.ts` stays a compatibility barrel.
|
||||
- `src/main/runtime/composers/` owns larger domain compositions.
|
||||
|
||||
## Architecture Intent
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Domain Ownership
|
||||
|
||||
Status: active
|
||||
Last verified: 2026-03-13
|
||||
Last verified: 2026-03-26
|
||||
Owner: Kyle Yasuda
|
||||
Read when: you need to find the owner module for a behavior or test surface
|
||||
|
||||
@@ -23,17 +23,28 @@ Read when: you need to find the owner module for a behavior or test surface
|
||||
- Anki workflow: `src/anki-integration/`, `src/core/services/anki-jimaku*.ts`
|
||||
- Immersion tracking: `src/core/services/immersion-tracker/`
|
||||
Includes stats storage/query schema such as `imm_videos`, `imm_media_art`, and `imm_youtube_videos` for per-video and YouTube-specific library metadata.
|
||||
- AniList tracking: `src/core/services/anilist/`, `src/main/runtime/composers/anilist-*`
|
||||
- AniList tracking + character dictionary: `src/core/services/anilist/`, `src/main/runtime/composers/anilist-*`, `src/main/character-dictionary-runtime.ts`, `src/main/character-dictionary-runtime/`
|
||||
- Jellyfin integration: `src/core/services/jellyfin*.ts`, `src/main/runtime/composers/jellyfin-*`
|
||||
- Window trackers: `src/window-trackers/`
|
||||
- Stats app: `stats/`
|
||||
- Public docs site: `docs-site/`
|
||||
|
||||
## Shared Contract Entry Points
|
||||
|
||||
- Config + app-state contracts: `src/types/config.ts`
|
||||
- Subtitle/token/media annotation contracts: `src/types/subtitle.ts`
|
||||
- Runtime/window/controller/Electron bridge contracts: `src/types/runtime.ts`
|
||||
- Anki-specific contracts: `src/types/anki.ts`
|
||||
- External integration contracts: `src/types/integrations.ts`
|
||||
- Runtime-option contracts: `src/types/runtime-options.ts`
|
||||
- Compatibility-only barrel: `src/types.ts`
|
||||
|
||||
## Ownership Heuristics
|
||||
|
||||
- Runtime wiring or dependency setup: start in `src/main/`
|
||||
- Business logic or service behavior: start in `src/core/services/`
|
||||
- UI interaction or overlay DOM behavior: start in `src/renderer/`
|
||||
- Command parsing or mpv launch flow: start in `launcher/`
|
||||
- Shared contract changes: add or edit the narrowest `src/types/<domain>.ts` entrypoint; only touch `src/types.ts` for compatibility exports.
|
||||
- User-facing docs: `docs-site/`
|
||||
- Internal process/docs: `docs/`
|
||||
|
||||
@@ -13,6 +13,7 @@ This section is the internal workflow map for contributors and agents.
|
||||
|
||||
- [Planning](./planning.md) - when to write a lightweight plan vs a full execution plan
|
||||
- [Verification](./verification.md) - maintained test/build lanes and handoff gate
|
||||
- [Agent Plugins](./agent-plugins.md) - repo-local plugin ownership for agent workflow skills
|
||||
- [Release Guide](../RELEASING.md) - tagged release workflow
|
||||
|
||||
## Default Flow
|
||||
|
||||
32
docs/workflow/agent-plugins.md
Normal file
32
docs/workflow/agent-plugins.md
Normal file
@@ -0,0 +1,32 @@
|
||||
<!-- read_when: using or modifying repo-local agent plugins -->
|
||||
|
||||
# Agent Plugins
|
||||
|
||||
Status: active
|
||||
Last verified: 2026-03-26
|
||||
Owner: Kyle Yasuda
|
||||
Read when: packaging or migrating repo-local agent workflow skills into plugins
|
||||
|
||||
## SubMiner Workflow Plugin
|
||||
|
||||
- Canonical plugin path: `plugins/subminer-workflow/`
|
||||
- Marketplace catalog: `.agents/plugins/marketplace.json`
|
||||
- Canonical skill sources:
|
||||
- `plugins/subminer-workflow/skills/subminer-scrum-master/`
|
||||
- `plugins/subminer-workflow/skills/subminer-change-verification/`
|
||||
|
||||
## Migration Rule
|
||||
|
||||
- Plugin-owned skills are the source of truth.
|
||||
- `.agents/skills/subminer-*` remain only as compatibility shims.
|
||||
- Existing script entrypoints under `.agents/skills/subminer-change-verification/scripts/` stay as wrappers so historical commands do not break.
|
||||
|
||||
## Backlog
|
||||
|
||||
- Prefer Backlog.md MCP when the host session exposes it.
|
||||
- If MCP is unavailable, use repo-local `backlog/` files and record that fallback.
|
||||
|
||||
## Verification
|
||||
|
||||
- For plugin/docs-only changes, start with `bun run test:docs:kb`.
|
||||
- Use the plugin-owned verifier when the change crosses from docs into scripts or workflow logic.
|
||||
@@ -31,8 +31,15 @@ bun run docs:build
|
||||
- Config/schema/defaults: `bun run test:config`, then `bun run generate:config-example` if template/defaults changed
|
||||
- Launcher/plugin: `bun run test:launcher` or `bun run test:env`
|
||||
- Runtime-compat / compiled behavior: `bun run test:runtime:compat`
|
||||
- Coverage for the maintained source lane: `bun run test:coverage:src`
|
||||
- Deep/local full gate: default handoff gate above
|
||||
|
||||
## Coverage Reporting
|
||||
|
||||
- `bun run test:coverage:src` runs the maintained `test:src` lane through a sharded coverage runner: one Bun coverage process per test file, then merged LCOV output.
|
||||
- Machine-readable output lands at `coverage/test-src/lcov.info`.
|
||||
- CI and release quality-gate runs upload that LCOV file as the `coverage-test-src` artifact.
|
||||
|
||||
## Rules
|
||||
|
||||
- Capture exact failing command and error when verification breaks.
|
||||
|
||||
@@ -227,11 +227,7 @@ test('stats background command launches attached daemon control command with res
|
||||
|
||||
assert.equal(handled, true);
|
||||
assert.deepEqual(harness.forwarded, [
|
||||
[
|
||||
'--stats-daemon-start',
|
||||
'--stats-response-path',
|
||||
'/tmp/subminer-stats-test/response.json',
|
||||
],
|
||||
['--stats-daemon-start', '--stats-response-path', '/tmp/subminer-stats-test/response.json'],
|
||||
]);
|
||||
assert.equal(harness.removedPaths.length, 1);
|
||||
});
|
||||
@@ -257,11 +253,7 @@ test('stats command waits for attached app exit after startup response', async (
|
||||
const final = await statsCommand;
|
||||
assert.equal(final, true);
|
||||
assert.deepEqual(harness.forwarded, [
|
||||
[
|
||||
'--stats',
|
||||
'--stats-response-path',
|
||||
'/tmp/subminer-stats-test/response.json',
|
||||
],
|
||||
['--stats', '--stats-response-path', '/tmp/subminer-stats-test/response.json'],
|
||||
]);
|
||||
assert.equal(harness.removedPaths.length, 1);
|
||||
});
|
||||
@@ -317,11 +309,7 @@ test('stats stop command forwards stop flag to the app', async () => {
|
||||
|
||||
assert.equal(handled, true);
|
||||
assert.deepEqual(harness.forwarded, [
|
||||
[
|
||||
'--stats-daemon-stop',
|
||||
'--stats-response-path',
|
||||
'/tmp/subminer-stats-test/response.json',
|
||||
],
|
||||
['--stats-daemon-stop', '--stats-response-path', '/tmp/subminer-stats-test/response.json'],
|
||||
]);
|
||||
assert.equal(harness.removedPaths.length, 1);
|
||||
});
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
waitForUnixSocketReady,
|
||||
} from '../mpv.js';
|
||||
import type { Args } from '../types.js';
|
||||
import { nowMs } from '../time.js';
|
||||
import type { LauncherCommandContext } from './context.js';
|
||||
import { ensureLauncherSetupReady } from '../setup-gate.js';
|
||||
import {
|
||||
@@ -116,7 +117,7 @@ async function ensurePlaybackSetupReady(context: LauncherCommandContext): Promis
|
||||
child.unref();
|
||||
},
|
||||
sleep: (ms) => new Promise((resolve) => setTimeout(resolve, ms)),
|
||||
now: () => Date.now(),
|
||||
now: () => nowMs(),
|
||||
timeoutMs: SETUP_WAIT_TIMEOUT_MS,
|
||||
pollIntervalMs: SETUP_POLL_INTERVAL_MS,
|
||||
});
|
||||
@@ -209,7 +210,11 @@ export async function runPlaybackCommandWithDeps(
|
||||
pluginRuntimeConfig.autoStartPauseUntilReady;
|
||||
|
||||
if (shouldPauseUntilOverlayReady) {
|
||||
deps.log('info', args.logLevel, 'Configured to pause mpv until overlay and tokenization are ready');
|
||||
deps.log(
|
||||
'info',
|
||||
args.logLevel,
|
||||
'Configured to pause mpv until overlay and tokenization are ready',
|
||||
);
|
||||
}
|
||||
|
||||
await deps.startMpv(
|
||||
@@ -250,7 +255,11 @@ export async function runPlaybackCommandWithDeps(
|
||||
if (ready) {
|
||||
deps.log('info', args.logLevel, 'MPV IPC socket ready, relying on mpv plugin auto-start');
|
||||
} else {
|
||||
deps.log('info', args.logLevel, 'MPV IPC socket not ready yet, relying on mpv plugin auto-start');
|
||||
deps.log(
|
||||
'info',
|
||||
args.logLevel,
|
||||
'MPV IPC socket not ready yet, relying on mpv plugin auto-start',
|
||||
);
|
||||
}
|
||||
} else if (ready) {
|
||||
deps.log(
|
||||
|
||||
@@ -2,6 +2,7 @@ import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { runAppCommandAttached } from '../mpv.js';
|
||||
import { nowMs } from '../time.js';
|
||||
import { sleep } from '../util.js';
|
||||
import type { LauncherCommandContext } from './context.js';
|
||||
|
||||
@@ -45,8 +46,8 @@ const defaultDeps: StatsCommandDeps = {
|
||||
runAppCommandAttached: (appPath, appArgs, logLevel, label) =>
|
||||
runAppCommandAttached(appPath, appArgs, logLevel, label),
|
||||
waitForStatsResponse: async (responsePath, signal) => {
|
||||
const deadline = Date.now() + STATS_STARTUP_RESPONSE_TIMEOUT_MS;
|
||||
while (Date.now() < deadline) {
|
||||
const deadline = nowMs() + STATS_STARTUP_RESPONSE_TIMEOUT_MS;
|
||||
while (nowMs() < deadline) {
|
||||
if (signal?.aborted) {
|
||||
return {
|
||||
ok: false,
|
||||
|
||||
155
launcher/config/args-normalizer.test.ts
Normal file
155
launcher/config/args-normalizer.test.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import assert from 'node:assert/strict';
|
||||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import test from 'node:test';
|
||||
import {
|
||||
applyInvocationsToArgs,
|
||||
applyRootOptionsToArgs,
|
||||
createDefaultArgs,
|
||||
} from './args-normalizer.js';
|
||||
|
||||
class ExitSignal extends Error {
|
||||
code: number;
|
||||
|
||||
constructor(code: number) {
|
||||
super(`exit:${code}`);
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
|
||||
function withProcessExitIntercept(callback: () => void): ExitSignal {
|
||||
const originalExit = process.exit;
|
||||
try {
|
||||
process.exit = ((code?: number) => {
|
||||
throw new ExitSignal(code ?? 0);
|
||||
}) as typeof process.exit;
|
||||
callback();
|
||||
} catch (error) {
|
||||
if (error instanceof ExitSignal) {
|
||||
return error;
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
process.exit = originalExit;
|
||||
}
|
||||
|
||||
throw new Error('expected process.exit');
|
||||
}
|
||||
|
||||
function withTempDir<T>(fn: (dir: string) => T): T {
|
||||
const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'subminer-launcher-args-'));
|
||||
try {
|
||||
return fn(dir);
|
||||
} finally {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
test('createDefaultArgs normalizes configured language codes and env thread override', () => {
|
||||
const originalThreads = process.env.SUBMINER_WHISPER_THREADS;
|
||||
process.env.SUBMINER_WHISPER_THREADS = '7';
|
||||
|
||||
try {
|
||||
const parsed = createDefaultArgs({
|
||||
primarySubLanguages: [' JA ', 'jpn', 'ja'],
|
||||
secondarySubLanguages: ['en', 'ENG', ''],
|
||||
whisperThreads: 2,
|
||||
});
|
||||
|
||||
assert.deepEqual(parsed.youtubePrimarySubLangs, ['ja', 'jpn']);
|
||||
assert.deepEqual(parsed.youtubeSecondarySubLangs, ['en', 'eng']);
|
||||
assert.deepEqual(parsed.youtubeAudioLangs, ['ja', 'jpn', 'en', 'eng']);
|
||||
assert.equal(parsed.whisperThreads, 7);
|
||||
assert.equal(parsed.youtubeWhisperSourceLanguage, 'ja');
|
||||
} finally {
|
||||
if (originalThreads === undefined) {
|
||||
delete process.env.SUBMINER_WHISPER_THREADS;
|
||||
} else {
|
||||
process.env.SUBMINER_WHISPER_THREADS = originalThreads;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
test('applyRootOptionsToArgs maps file, directory, and url targets', () => {
|
||||
withTempDir((dir) => {
|
||||
const filePath = path.join(dir, 'movie.mkv');
|
||||
const folderPath = path.join(dir, 'anime');
|
||||
fs.writeFileSync(filePath, 'x');
|
||||
fs.mkdirSync(folderPath);
|
||||
|
||||
const fileParsed = createDefaultArgs({});
|
||||
applyRootOptionsToArgs(fileParsed, {}, filePath);
|
||||
assert.equal(fileParsed.targetKind, 'file');
|
||||
assert.equal(fileParsed.target, filePath);
|
||||
|
||||
const dirParsed = createDefaultArgs({});
|
||||
applyRootOptionsToArgs(dirParsed, {}, folderPath);
|
||||
assert.equal(dirParsed.directory, folderPath);
|
||||
assert.equal(dirParsed.target, '');
|
||||
assert.equal(dirParsed.targetKind, '');
|
||||
|
||||
const urlParsed = createDefaultArgs({});
|
||||
applyRootOptionsToArgs(urlParsed, {}, 'https://example.test/video');
|
||||
assert.equal(urlParsed.targetKind, 'url');
|
||||
assert.equal(urlParsed.target, 'https://example.test/video');
|
||||
});
|
||||
});
|
||||
|
||||
test('applyRootOptionsToArgs rejects unsupported targets', () => {
|
||||
const parsed = createDefaultArgs({});
|
||||
|
||||
const error = withProcessExitIntercept(() => {
|
||||
applyRootOptionsToArgs(parsed, {}, '/definitely/missing/subminer-target');
|
||||
});
|
||||
|
||||
assert.equal(error.code, 1);
|
||||
assert.match(error.message, /exit:1/);
|
||||
});
|
||||
|
||||
test('applyInvocationsToArgs maps config and jellyfin invocation state', () => {
|
||||
const parsed = createDefaultArgs({});
|
||||
|
||||
applyInvocationsToArgs(parsed, {
|
||||
jellyfinInvocation: {
|
||||
action: 'play',
|
||||
play: true,
|
||||
server: 'https://jf.example',
|
||||
username: 'alice',
|
||||
password: 'secret',
|
||||
logLevel: 'debug',
|
||||
},
|
||||
configInvocation: {
|
||||
action: 'show',
|
||||
logLevel: 'warn',
|
||||
},
|
||||
mpvInvocation: null,
|
||||
appInvocation: null,
|
||||
dictionaryTriggered: false,
|
||||
dictionaryTarget: null,
|
||||
dictionaryLogLevel: null,
|
||||
statsTriggered: false,
|
||||
statsBackground: false,
|
||||
statsStop: false,
|
||||
statsCleanup: false,
|
||||
statsCleanupVocab: false,
|
||||
statsCleanupLifetime: false,
|
||||
statsLogLevel: null,
|
||||
doctorTriggered: false,
|
||||
doctorLogLevel: null,
|
||||
doctorRefreshKnownWords: false,
|
||||
texthookerTriggered: false,
|
||||
texthookerLogLevel: null,
|
||||
});
|
||||
|
||||
assert.equal(parsed.jellyfin, false);
|
||||
assert.equal(parsed.jellyfinPlay, true);
|
||||
assert.equal(parsed.jellyfinDiscovery, false);
|
||||
assert.equal(parsed.jellyfinLogin, false);
|
||||
assert.equal(parsed.jellyfinLogout, false);
|
||||
assert.equal(parsed.jellyfinServer, 'https://jf.example');
|
||||
assert.equal(parsed.jellyfinUsername, 'alice');
|
||||
assert.equal(parsed.jellyfinPassword, 'secret');
|
||||
assert.equal(parsed.configShow, true);
|
||||
assert.equal(parsed.logLevel, 'warn');
|
||||
});
|
||||
37
launcher/config/cli-parser-builder.test.ts
Normal file
37
launcher/config/cli-parser-builder.test.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import assert from 'node:assert/strict';
|
||||
import test from 'node:test';
|
||||
import { parseCliPrograms, resolveTopLevelCommand } from './cli-parser-builder.js';
|
||||
|
||||
test('resolveTopLevelCommand skips root options and finds the first command', () => {
|
||||
assert.deepEqual(resolveTopLevelCommand(['--backend', 'macos', 'config', 'show']), {
|
||||
name: 'config',
|
||||
index: 2,
|
||||
});
|
||||
});
|
||||
|
||||
test('resolveTopLevelCommand respects the app alias after root options', () => {
|
||||
assert.deepEqual(resolveTopLevelCommand(['--log-level', 'debug', 'bin', '--foo']), {
|
||||
name: 'bin',
|
||||
index: 2,
|
||||
});
|
||||
});
|
||||
|
||||
test('parseCliPrograms keeps root options and target when no command is present', () => {
|
||||
const result = parseCliPrograms(['--backend', 'x11', '/tmp/movie.mkv'], 'subminer');
|
||||
|
||||
assert.equal(result.options.backend, 'x11');
|
||||
assert.equal(result.rootTarget, '/tmp/movie.mkv');
|
||||
assert.equal(result.invocations.appInvocation, null);
|
||||
});
|
||||
|
||||
test('parseCliPrograms routes app alias arguments through passthrough mode', () => {
|
||||
const result = parseCliPrograms(
|
||||
['--backend', 'macos', 'bin', '--anilist', '--log-level', 'debug'],
|
||||
'subminer',
|
||||
);
|
||||
|
||||
assert.equal(result.options.backend, 'macos');
|
||||
assert.deepEqual(result.invocations.appInvocation, {
|
||||
appArgs: ['--anilist', '--log-level', 'debug'],
|
||||
});
|
||||
});
|
||||
@@ -236,17 +236,12 @@ export function parseCliPrograms(
|
||||
normalizedAction !== 'rebuild' &&
|
||||
normalizedAction !== 'backfill'
|
||||
) {
|
||||
throw new Error(
|
||||
'Invalid stats action. Valid values are cleanup, rebuild, or backfill.',
|
||||
);
|
||||
throw new Error('Invalid stats action. Valid values are cleanup, rebuild, or backfill.');
|
||||
}
|
||||
if (normalizedAction && (statsBackground || statsStop)) {
|
||||
throw new Error('Stats background and stop flags cannot be combined with stats actions.');
|
||||
}
|
||||
if (
|
||||
normalizedAction !== 'cleanup' &&
|
||||
(options.vocab === true || options.lifetime === true)
|
||||
) {
|
||||
if (normalizedAction !== 'cleanup' && (options.vocab === true || options.lifetime === true)) {
|
||||
throw new Error('Stats --vocab and --lifetime flags require the cleanup action.');
|
||||
}
|
||||
if (normalizedAction === 'cleanup') {
|
||||
|
||||
@@ -10,6 +10,7 @@ import type {
|
||||
JellyfinGroupEntry,
|
||||
} from './types.js';
|
||||
import { log, fail, getMpvLogPath } from './log.js';
|
||||
import { nowMs } from './time.js';
|
||||
import { commandExists, resolvePathMaybe, sleep } from './util.js';
|
||||
import {
|
||||
pickLibrary,
|
||||
@@ -453,9 +454,9 @@ async function runAppJellyfinCommand(
|
||||
}
|
||||
return retriedAfterStart ? 12000 : 4000;
|
||||
})();
|
||||
const settleDeadline = Date.now() + settleWindowMs;
|
||||
const settleDeadline = nowMs() + settleWindowMs;
|
||||
const settleOffset = attempt.logOffset;
|
||||
while (Date.now() < settleDeadline) {
|
||||
while (nowMs() < settleDeadline) {
|
||||
await sleep(100);
|
||||
const settledOutput = readLogAppendedSince(settleOffset);
|
||||
if (!settledOutput.trim()) {
|
||||
@@ -489,8 +490,8 @@ async function requestJellyfinPreviewAuthFromApp(
|
||||
return null;
|
||||
}
|
||||
|
||||
const deadline = Date.now() + 4000;
|
||||
while (Date.now() < deadline) {
|
||||
const deadline = nowMs() + 4000;
|
||||
while (nowMs() < deadline) {
|
||||
try {
|
||||
if (fs.existsSync(responsePath)) {
|
||||
const raw = fs.readFileSync(responsePath, 'utf8');
|
||||
|
||||
@@ -14,12 +14,7 @@ test('getDefaultMpvLogFile uses APPDATA on windows', () => {
|
||||
assert.equal(
|
||||
path.normalize(resolved),
|
||||
path.normalize(
|
||||
path.join(
|
||||
'C:\\Users\\tester\\AppData\\Roaming',
|
||||
'SubMiner',
|
||||
'logs',
|
||||
`mpv-${today}.log`,
|
||||
),
|
||||
path.join('C:\\Users\\tester\\AppData\\Roaming', 'SubMiner', 'logs', `mpv-${today}.log`),
|
||||
),
|
||||
);
|
||||
});
|
||||
@@ -33,12 +28,6 @@ test('getDefaultLauncherLogFile uses launcher prefix', () => {
|
||||
|
||||
assert.equal(
|
||||
resolved,
|
||||
path.join(
|
||||
'/home/tester',
|
||||
'.config',
|
||||
'SubMiner',
|
||||
'logs',
|
||||
`launcher-${today}.log`,
|
||||
),
|
||||
path.join('/home/tester', '.config', 'SubMiner', 'logs', `launcher-${today}.log`),
|
||||
);
|
||||
});
|
||||
|
||||
@@ -36,6 +36,8 @@ function withTempDir<T>(fn: (dir: string) => T): T {
|
||||
}
|
||||
}
|
||||
|
||||
const LAUNCHER_RUN_TIMEOUT_MS = 30000;
|
||||
|
||||
function runLauncher(argv: string[], env: NodeJS.ProcessEnv): RunResult {
|
||||
const result = spawnSync(
|
||||
process.execPath,
|
||||
@@ -43,6 +45,7 @@ function runLauncher(argv: string[], env: NodeJS.ProcessEnv): RunResult {
|
||||
{
|
||||
env,
|
||||
encoding: 'utf8',
|
||||
timeout: LAUNCHER_RUN_TIMEOUT_MS,
|
||||
},
|
||||
);
|
||||
return {
|
||||
@@ -269,10 +272,7 @@ ${bunBinary} -e "const net=require('node:net'); const fs=require('node:fs'); con
|
||||
SUBMINER_APPIMAGE_PATH: appPath,
|
||||
SUBMINER_TEST_MPV_ARGS: mpvArgsPath,
|
||||
};
|
||||
const result = runLauncher(
|
||||
['--args', '--pause=yes --title="movie night"', videoPath],
|
||||
env,
|
||||
);
|
||||
const result = runLauncher(['--args', '--pause=yes --title="movie night"', videoPath], env);
|
||||
|
||||
assert.equal(result.status, 0, `stdout:\n${result.stdout}\nstderr:\n${result.stderr}`);
|
||||
const argsFile = fs.readFileSync(mpvArgsPath, 'utf8');
|
||||
@@ -355,10 +355,7 @@ ${bunBinary} -e "const net=require('node:net'); const fs=require('node:fs'); con
|
||||
const result = runLauncher(['--log-level', 'debug', videoPath], env);
|
||||
|
||||
assert.equal(result.status, 0, `stdout:\n${result.stdout}\nstderr:\n${result.stderr}`);
|
||||
assert.match(
|
||||
fs.readFileSync(mpvArgsPath, 'utf8'),
|
||||
/--script-opts=.*subminer-log_level=debug/,
|
||||
);
|
||||
assert.match(fs.readFileSync(mpvArgsPath, 'utf8'), /--script-opts=.*subminer-log_level=debug/);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -427,7 +427,10 @@ function withFindAppBinaryEnvSandbox(run: () => void): void {
|
||||
}
|
||||
}
|
||||
|
||||
function withAccessSyncStub(isExecutablePath: (filePath: string) => boolean, run: () => void): void {
|
||||
function withAccessSyncStub(
|
||||
isExecutablePath: (filePath: string) => boolean,
|
||||
run: () => void,
|
||||
): void {
|
||||
const originalAccessSync = fs.accessSync;
|
||||
try {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
@@ -468,10 +471,13 @@ test('findAppBinary resolves /opt/SubMiner/SubMiner.AppImage when ~/.local/bin c
|
||||
try {
|
||||
os.homedir = () => baseDir;
|
||||
withFindAppBinaryEnvSandbox(() => {
|
||||
withAccessSyncStub((filePath) => filePath === '/opt/SubMiner/SubMiner.AppImage', () => {
|
||||
const result = findAppBinary('/some/other/path/subminer');
|
||||
assert.equal(result, '/opt/SubMiner/SubMiner.AppImage');
|
||||
});
|
||||
withAccessSyncStub(
|
||||
(filePath) => filePath === '/opt/SubMiner/SubMiner.AppImage',
|
||||
() => {
|
||||
const result = findAppBinary('/some/other/path/subminer');
|
||||
assert.equal(result, '/opt/SubMiner/SubMiner.AppImage');
|
||||
},
|
||||
);
|
||||
});
|
||||
} finally {
|
||||
os.homedir = originalHomedir;
|
||||
@@ -492,11 +498,14 @@ test('findAppBinary finds subminer on PATH when AppImage candidates do not exist
|
||||
process.env.PATH = `${binDir}${path.delimiter}${originalPath ?? ''}`;
|
||||
|
||||
withFindAppBinaryEnvSandbox(() => {
|
||||
withAccessSyncStub((filePath) => filePath === wrapperPath, () => {
|
||||
// selfPath must differ from wrapperPath so the self-check does not exclude it
|
||||
const result = findAppBinary(path.join(baseDir, 'launcher', 'subminer'));
|
||||
assert.equal(result, wrapperPath);
|
||||
});
|
||||
withAccessSyncStub(
|
||||
(filePath) => filePath === wrapperPath,
|
||||
() => {
|
||||
// selfPath must differ from wrapperPath so the self-check does not exclude it
|
||||
const result = findAppBinary(path.join(baseDir, 'launcher', 'subminer'));
|
||||
assert.equal(result, wrapperPath);
|
||||
},
|
||||
);
|
||||
});
|
||||
} finally {
|
||||
os.homedir = originalHomedir;
|
||||
|
||||
@@ -7,6 +7,7 @@ import type { LogLevel, Backend, Args, MpvTrack } from './types.js';
|
||||
import { DEFAULT_MPV_SUBMINER_ARGS, DEFAULT_YOUTUBE_YTDL_FORMAT } from './types.js';
|
||||
import { appendToAppLog, getAppLogPath, log, fail, getMpvLogPath } from './log.js';
|
||||
import { buildSubminerScriptOpts, resolveAniSkipMetadataForFile } from './aniskip-metadata.js';
|
||||
import { nowMs } from './time.js';
|
||||
import {
|
||||
commandExists,
|
||||
getPathEnv,
|
||||
@@ -47,7 +48,11 @@ export function parseMpvArgString(input: string): string[] {
|
||||
let inDoubleQuote = false;
|
||||
let escaping = false;
|
||||
const canEscape = (nextChar: string | undefined): boolean =>
|
||||
nextChar === undefined || nextChar === '"' || nextChar === "'" || nextChar === '\\' || /\s/.test(nextChar);
|
||||
nextChar === undefined ||
|
||||
nextChar === '"' ||
|
||||
nextChar === "'" ||
|
||||
nextChar === '\\' ||
|
||||
/\s/.test(nextChar);
|
||||
|
||||
for (let i = 0; i < chars.length; i += 1) {
|
||||
const ch = chars[i] || '';
|
||||
@@ -196,8 +201,8 @@ async function terminateTrackedDetachedMpv(logLevel: LogLevel): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
const deadline = Date.now() + 1500;
|
||||
while (Date.now() < deadline) {
|
||||
const deadline = nowMs() + 1500;
|
||||
while (nowMs() < deadline) {
|
||||
if (!isProcessAlive(pid)) {
|
||||
clearTrackedDetachedMpvPid();
|
||||
return;
|
||||
@@ -340,7 +345,7 @@ export function sendMpvCommandWithResponse(
|
||||
timeoutMs = 5000,
|
||||
): Promise<unknown> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const requestId = Date.now() + Math.floor(Math.random() * 1000);
|
||||
const requestId = nowMs() + Math.floor(Math.random() * 1000);
|
||||
const socket = net.createConnection(socketPath);
|
||||
let buffer = '';
|
||||
|
||||
@@ -598,7 +603,9 @@ export async function startMpv(
|
||||
? await resolveAniSkipMetadataForFile(target)
|
||||
: null;
|
||||
const extraScriptOpts =
|
||||
targetKind === 'url' && isYoutubeTarget(target) && options?.disableYoutubeSubtitleAutoLoad === true
|
||||
targetKind === 'url' &&
|
||||
isYoutubeTarget(target) &&
|
||||
options?.disableYoutubeSubtitleAutoLoad === true
|
||||
? ['subminer-auto_start_pause_until_ready=no']
|
||||
: [];
|
||||
const scriptOpts = buildSubminerScriptOpts(
|
||||
@@ -1064,7 +1071,9 @@ export function launchMpvIdleDetached(
|
||||
mpvArgs.push(...parseMpvArgString(args.mpvArgs));
|
||||
}
|
||||
mpvArgs.push('--idle=yes');
|
||||
mpvArgs.push(`--script-opts=${buildSubminerScriptOpts(appPath, socketPath, null, args.logLevel)}`);
|
||||
mpvArgs.push(
|
||||
`--script-opts=${buildSubminerScriptOpts(appPath, socketPath, null, args.logLevel)}`,
|
||||
);
|
||||
mpvArgs.push(`--log-file=${getMpvLogPath()}`);
|
||||
mpvArgs.push(`--input-ipc-server=${socketPath}`);
|
||||
const mpvTarget = resolveCommandInvocation('mpv', mpvArgs);
|
||||
@@ -1109,8 +1118,8 @@ export async function waitForUnixSocketReady(
|
||||
socketPath: string,
|
||||
timeoutMs: number,
|
||||
): Promise<boolean> {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const deadline = nowMs() + timeoutMs;
|
||||
while (nowMs() < deadline) {
|
||||
try {
|
||||
if (fs.existsSync(socketPath)) {
|
||||
const ready = await canConnectUnixSocket(socketPath);
|
||||
|
||||
8
launcher/time.ts
Normal file
8
launcher/time.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
export function nowMs(): number {
|
||||
const perf = globalThis.performance;
|
||||
if (perf) {
|
||||
return Math.floor(perf.timeOrigin + perf.now());
|
||||
}
|
||||
|
||||
return Number(process.hrtime.bigint() / 1000000n);
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import os from 'node:os';
|
||||
import { spawn } from 'node:child_process';
|
||||
import type { LogLevel, CommandExecOptions, CommandExecResult } from './types.js';
|
||||
import { log } from './log.js';
|
||||
import { nowMs } from './time.js';
|
||||
|
||||
export function sleep(ms: number): Promise<void> {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
@@ -198,7 +199,7 @@ export function normalizeBasename(value: string, fallback: string): string {
|
||||
if (safe) return safe;
|
||||
const fallbackSafe = sanitizeToken(fallback);
|
||||
if (fallbackSafe) return fallbackSafe;
|
||||
return `${Date.now()}`;
|
||||
return `${nowMs()}`;
|
||||
}
|
||||
|
||||
export function normalizeLangCode(value: string): string {
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
export { generateYoutubeSubtitles, resolveWhisperBinary } from './youtube/orchestrator.js';
|
||||
@@ -1,84 +0,0 @@
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
|
||||
import type { Args } from '../types.js';
|
||||
import { YOUTUBE_AUDIO_EXTENSIONS } from '../types.js';
|
||||
import { runExternalCommand } from '../util.js';
|
||||
|
||||
export function findAudioFile(tempDir: string, preferredExt: string): string | null {
|
||||
const entries = fs.readdirSync(tempDir);
|
||||
const audioFiles: Array<{ path: string; ext: string; mtimeMs: number }> = [];
|
||||
for (const name of entries) {
|
||||
const fullPath = path.join(tempDir, name);
|
||||
let stat: fs.Stats;
|
||||
try {
|
||||
stat = fs.statSync(fullPath);
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (!stat.isFile()) continue;
|
||||
const ext = path.extname(name).toLowerCase();
|
||||
if (!YOUTUBE_AUDIO_EXTENSIONS.has(ext)) continue;
|
||||
audioFiles.push({ path: fullPath, ext, mtimeMs: stat.mtimeMs });
|
||||
}
|
||||
if (audioFiles.length === 0) return null;
|
||||
const preferred = audioFiles.find((entry) => entry.ext === `.${preferredExt.toLowerCase()}`);
|
||||
if (preferred) return preferred.path;
|
||||
audioFiles.sort((a, b) => b.mtimeMs - a.mtimeMs);
|
||||
return audioFiles[0]?.path ?? null;
|
||||
}
|
||||
|
||||
export async function convertAudioForWhisper(inputPath: string, tempDir: string): Promise<string> {
|
||||
const wavPath = path.join(tempDir, 'whisper-input.wav');
|
||||
await runExternalCommand('ffmpeg', [
|
||||
'-y',
|
||||
'-loglevel',
|
||||
'error',
|
||||
'-i',
|
||||
inputPath,
|
||||
'-ar',
|
||||
'16000',
|
||||
'-ac',
|
||||
'1',
|
||||
'-c:a',
|
||||
'pcm_s16le',
|
||||
wavPath,
|
||||
]);
|
||||
if (!fs.existsSync(wavPath)) {
|
||||
throw new Error(`Failed to prepare whisper audio input: ${wavPath}`);
|
||||
}
|
||||
return wavPath;
|
||||
}
|
||||
|
||||
export async function downloadYoutubeAudio(
|
||||
target: string,
|
||||
args: Args,
|
||||
tempDir: string,
|
||||
childTracker?: Set<ReturnType<typeof import('node:child_process').spawn>>,
|
||||
): Promise<string> {
|
||||
await runExternalCommand(
|
||||
'yt-dlp',
|
||||
[
|
||||
'-f',
|
||||
'bestaudio/best',
|
||||
'--extract-audio',
|
||||
'--audio-format',
|
||||
args.youtubeSubgenAudioFormat,
|
||||
'--no-warnings',
|
||||
'-o',
|
||||
path.join(tempDir, '%(id)s.%(ext)s'),
|
||||
target,
|
||||
],
|
||||
{
|
||||
logLevel: args.logLevel,
|
||||
commandLabel: 'yt-dlp:audio',
|
||||
streamOutput: true,
|
||||
},
|
||||
childTracker,
|
||||
);
|
||||
const audioPath = findAudioFile(tempDir, args.youtubeSubgenAudioFormat);
|
||||
if (!audioPath) {
|
||||
throw new Error('Audio extraction succeeded, but no audio file was found.');
|
||||
}
|
||||
return audioPath;
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
|
||||
import type { SubtitleCandidate } from '../types.js';
|
||||
import { YOUTUBE_SUB_EXTENSIONS } from '../types.js';
|
||||
import { escapeRegExp, runExternalCommand } from '../util.js';
|
||||
|
||||
function filenameHasLanguageTag(filenameLower: string, langCode: string): boolean {
|
||||
const escaped = escapeRegExp(langCode);
|
||||
const pattern = new RegExp(`(^|[._-])${escaped}([._-]|$)`);
|
||||
return pattern.test(filenameLower);
|
||||
}
|
||||
|
||||
function classifyLanguage(
|
||||
filename: string,
|
||||
primaryLangCodes: string[],
|
||||
secondaryLangCodes: string[],
|
||||
): 'primary' | 'secondary' | null {
|
||||
const lower = filename.toLowerCase();
|
||||
const primary = primaryLangCodes.some((code) => filenameHasLanguageTag(lower, code));
|
||||
const secondary = secondaryLangCodes.some((code) => filenameHasLanguageTag(lower, code));
|
||||
if (primary && !secondary) return 'primary';
|
||||
if (secondary && !primary) return 'secondary';
|
||||
return null;
|
||||
}
|
||||
|
||||
export function toYtdlpLangPattern(langCodes: string[]): string {
|
||||
return langCodes.map((lang) => `${lang}.*`).join(',');
|
||||
}
|
||||
|
||||
export function scanSubtitleCandidates(
|
||||
tempDir: string,
|
||||
knownSet: Set<string>,
|
||||
source: SubtitleCandidate['source'],
|
||||
primaryLangCodes: string[],
|
||||
secondaryLangCodes: string[],
|
||||
): SubtitleCandidate[] {
|
||||
const entries = fs.readdirSync(tempDir);
|
||||
const out: SubtitleCandidate[] = [];
|
||||
for (const name of entries) {
|
||||
const fullPath = path.join(tempDir, name);
|
||||
if (knownSet.has(fullPath)) continue;
|
||||
let stat: fs.Stats;
|
||||
try {
|
||||
stat = fs.statSync(fullPath);
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (!stat.isFile()) continue;
|
||||
const ext = path.extname(fullPath).toLowerCase();
|
||||
if (!YOUTUBE_SUB_EXTENSIONS.has(ext)) continue;
|
||||
const lang = classifyLanguage(name, primaryLangCodes, secondaryLangCodes);
|
||||
if (!lang) continue;
|
||||
out.push({ path: fullPath, lang, ext, size: stat.size, source });
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
export function pickBestCandidate(candidates: SubtitleCandidate[]): SubtitleCandidate | null {
|
||||
if (candidates.length === 0) return null;
|
||||
const scored = [...candidates].sort((a, b) => {
|
||||
const srtA = a.ext === '.srt' ? 1 : 0;
|
||||
const srtB = b.ext === '.srt' ? 1 : 0;
|
||||
if (srtA !== srtB) return srtB - srtA;
|
||||
return b.size - a.size;
|
||||
});
|
||||
return scored[0] ?? null;
|
||||
}
|
||||
|
||||
export async function downloadManualSubtitles(
|
||||
target: string,
|
||||
tempDir: string,
|
||||
langPattern: string,
|
||||
logLevel: import('../types.js').LogLevel,
|
||||
childTracker?: Set<ReturnType<typeof import('node:child_process').spawn>>,
|
||||
): Promise<void> {
|
||||
await runExternalCommand(
|
||||
'yt-dlp',
|
||||
[
|
||||
'--skip-download',
|
||||
'--no-warnings',
|
||||
'--write-subs',
|
||||
'--sub-format',
|
||||
'srt/vtt/best',
|
||||
'--sub-langs',
|
||||
langPattern,
|
||||
'-o',
|
||||
path.join(tempDir, '%(id)s.%(ext)s'),
|
||||
target,
|
||||
],
|
||||
{
|
||||
allowFailure: true,
|
||||
logLevel,
|
||||
commandLabel: 'yt-dlp:manual-subs',
|
||||
streamOutput: true,
|
||||
},
|
||||
childTracker,
|
||||
);
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { planYoutubeSubtitleGeneration } from './orchestrator';
|
||||
|
||||
test('planYoutubeSubtitleGeneration prefers manual subtitles and never schedules auto-subs', () => {
|
||||
assert.deepEqual(
|
||||
planYoutubeSubtitleGeneration({
|
||||
hasPrimaryManualSubtitle: true,
|
||||
hasSecondaryManualSubtitle: false,
|
||||
secondaryCanTranslate: true,
|
||||
}),
|
||||
{
|
||||
fetchManualSubtitles: true,
|
||||
fetchAutoSubtitles: false,
|
||||
publishPrimaryManualSubtitle: false,
|
||||
publishSecondaryManualSubtitle: false,
|
||||
generatePrimarySubtitle: false,
|
||||
generateSecondarySubtitle: true,
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test('planYoutubeSubtitleGeneration generates only missing tracks', () => {
|
||||
assert.deepEqual(
|
||||
planYoutubeSubtitleGeneration({
|
||||
hasPrimaryManualSubtitle: false,
|
||||
hasSecondaryManualSubtitle: true,
|
||||
secondaryCanTranslate: true,
|
||||
}),
|
||||
{
|
||||
fetchManualSubtitles: true,
|
||||
fetchAutoSubtitles: false,
|
||||
publishPrimaryManualSubtitle: false,
|
||||
publishSecondaryManualSubtitle: false,
|
||||
generatePrimarySubtitle: true,
|
||||
generateSecondarySubtitle: false,
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test('planYoutubeSubtitleGeneration reuses manual tracks already present on the YouTube video', () => {
|
||||
assert.deepEqual(
|
||||
planYoutubeSubtitleGeneration({
|
||||
hasPrimaryManualSubtitle: true,
|
||||
hasSecondaryManualSubtitle: true,
|
||||
secondaryCanTranslate: true,
|
||||
}),
|
||||
{
|
||||
fetchManualSubtitles: true,
|
||||
fetchAutoSubtitles: false,
|
||||
publishPrimaryManualSubtitle: false,
|
||||
publishSecondaryManualSubtitle: false,
|
||||
generatePrimarySubtitle: false,
|
||||
generateSecondarySubtitle: false,
|
||||
},
|
||||
);
|
||||
});
|
||||
@@ -1,366 +0,0 @@
|
||||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
|
||||
import type { Args, SubtitleCandidate, YoutubeSubgenOutputs } from '../types.js';
|
||||
import { log } from '../log.js';
|
||||
import {
|
||||
commandExists,
|
||||
normalizeBasename,
|
||||
resolvePathMaybe,
|
||||
runExternalCommand,
|
||||
uniqueNormalizedLangCodes,
|
||||
} from '../util.js';
|
||||
import { state } from '../mpv.js';
|
||||
import { downloadYoutubeAudio, convertAudioForWhisper } from './audio-extraction.js';
|
||||
import {
|
||||
downloadManualSubtitles,
|
||||
pickBestCandidate,
|
||||
scanSubtitleCandidates,
|
||||
toYtdlpLangPattern,
|
||||
} from './manual-subs.js';
|
||||
import { runLoggedYoutubePhase } from './progress.js';
|
||||
import { fixSubtitleWithAi } from './subtitle-fix-ai.js';
|
||||
import { runWhisper } from './whisper.js';
|
||||
|
||||
export interface YoutubeSubtitleGenerationPlan {
|
||||
fetchManualSubtitles: true;
|
||||
fetchAutoSubtitles: false;
|
||||
publishPrimaryManualSubtitle: false;
|
||||
publishSecondaryManualSubtitle: false;
|
||||
generatePrimarySubtitle: boolean;
|
||||
generateSecondarySubtitle: boolean;
|
||||
}
|
||||
|
||||
export function planYoutubeSubtitleGeneration(input: {
|
||||
hasPrimaryManualSubtitle: boolean;
|
||||
hasSecondaryManualSubtitle: boolean;
|
||||
secondaryCanTranslate: boolean;
|
||||
}): YoutubeSubtitleGenerationPlan {
|
||||
return {
|
||||
fetchManualSubtitles: true,
|
||||
fetchAutoSubtitles: false,
|
||||
publishPrimaryManualSubtitle: false,
|
||||
publishSecondaryManualSubtitle: false,
|
||||
generatePrimarySubtitle: !input.hasPrimaryManualSubtitle,
|
||||
generateSecondarySubtitle: !input.hasSecondaryManualSubtitle && input.secondaryCanTranslate,
|
||||
};
|
||||
}
|
||||
|
||||
function preferredLangLabel(langCodes: string[], fallback: string): string {
|
||||
return uniqueNormalizedLangCodes(langCodes)[0] || fallback;
|
||||
}
|
||||
|
||||
function sourceTag(source: SubtitleCandidate['source']): string {
|
||||
return source;
|
||||
}
|
||||
|
||||
export function resolveWhisperBinary(args: Args): string | null {
|
||||
const explicit = args.whisperBin.trim();
|
||||
if (explicit) return resolvePathMaybe(explicit);
|
||||
if (commandExists('whisper-cli')) return 'whisper-cli';
|
||||
return null;
|
||||
}
|
||||
|
||||
async function maybeFixSubtitleWithAi(
|
||||
selectedPath: string,
|
||||
args: Args,
|
||||
expectedLanguage?: string,
|
||||
): Promise<string> {
|
||||
if (!args.youtubeFixWithAi || args.aiConfig.enabled !== true) {
|
||||
return selectedPath;
|
||||
}
|
||||
const fixedContent = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: `Starting AI subtitle fix: ${path.basename(selectedPath)}`,
|
||||
finishMessage: `Finished AI subtitle fix: ${path.basename(selectedPath)}`,
|
||||
failureMessage: `AI subtitle fix failed: ${path.basename(selectedPath)}`,
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
async () => {
|
||||
const originalContent = fs.readFileSync(selectedPath, 'utf8');
|
||||
return fixSubtitleWithAi(
|
||||
originalContent,
|
||||
args.aiConfig,
|
||||
(message) => {
|
||||
log('warn', args.logLevel, message);
|
||||
},
|
||||
expectedLanguage,
|
||||
);
|
||||
},
|
||||
);
|
||||
if (!fixedContent) {
|
||||
return selectedPath;
|
||||
}
|
||||
|
||||
const fixedPath = selectedPath.replace(/\.srt$/i, '.fixed.srt');
|
||||
fs.writeFileSync(fixedPath, fixedContent, 'utf8');
|
||||
return fixedPath;
|
||||
}
|
||||
|
||||
export async function generateYoutubeSubtitles(
|
||||
target: string,
|
||||
args: Args,
|
||||
onReady?: (lang: 'primary' | 'secondary', pathToLoad: string) => Promise<void>,
|
||||
): Promise<YoutubeSubgenOutputs> {
|
||||
const outDir = path.resolve(resolvePathMaybe(args.youtubeSubgenOutDir));
|
||||
fs.mkdirSync(outDir, { recursive: true });
|
||||
|
||||
const primaryLangCodes = uniqueNormalizedLangCodes(args.youtubePrimarySubLangs);
|
||||
const secondaryLangCodes = uniqueNormalizedLangCodes(args.youtubeSecondarySubLangs);
|
||||
const primaryLabel = preferredLangLabel(primaryLangCodes, 'primary');
|
||||
const secondaryLabel = preferredLangLabel(secondaryLangCodes, 'secondary');
|
||||
const secondaryCanUseWhisperTranslate =
|
||||
secondaryLangCodes.includes('en') || secondaryLangCodes.includes('eng');
|
||||
const manualLangs = toYtdlpLangPattern([...primaryLangCodes, ...secondaryLangCodes]);
|
||||
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'subminer-yt-subgen-'));
|
||||
const knownFiles = new Set<string>();
|
||||
let keepTemp = args.youtubeSubgenKeepTemp;
|
||||
|
||||
const publishTrack = async (
|
||||
lang: 'primary' | 'secondary',
|
||||
source: SubtitleCandidate['source'],
|
||||
selectedPath: string,
|
||||
basename: string,
|
||||
): Promise<string> => {
|
||||
const langLabel = lang === 'primary' ? primaryLabel : secondaryLabel;
|
||||
const taggedPath = path.join(outDir, `${basename}.${langLabel}.${sourceTag(source)}.srt`);
|
||||
const aliasPath = path.join(outDir, `${basename}.${langLabel}.srt`);
|
||||
fs.copyFileSync(selectedPath, taggedPath);
|
||||
fs.copyFileSync(taggedPath, aliasPath);
|
||||
log('info', args.logLevel, `Generated subtitle (${langLabel}, ${source}) -> ${aliasPath}`);
|
||||
if (onReady) await onReady(lang, aliasPath);
|
||||
return aliasPath;
|
||||
};
|
||||
|
||||
try {
|
||||
const meta = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: 'Starting YouTube metadata probe',
|
||||
finishMessage: 'Finished YouTube metadata probe',
|
||||
failureMessage: 'YouTube metadata probe failed',
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
() =>
|
||||
runExternalCommand(
|
||||
'yt-dlp',
|
||||
['--dump-single-json', '--no-warnings', target],
|
||||
{
|
||||
captureStdout: true,
|
||||
logLevel: args.logLevel,
|
||||
commandLabel: 'yt-dlp:meta',
|
||||
},
|
||||
state.youtubeSubgenChildren,
|
||||
),
|
||||
);
|
||||
const metadata = JSON.parse(meta.stdout) as { id?: string };
|
||||
const videoId = metadata.id || `${Date.now()}`;
|
||||
const basename = normalizeBasename(videoId, videoId);
|
||||
|
||||
await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: `Starting manual subtitle probe (${manualLangs || 'requested langs'})`,
|
||||
finishMessage: 'Finished manual subtitle probe',
|
||||
failureMessage: 'Manual subtitle probe failed',
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
() =>
|
||||
downloadManualSubtitles(
|
||||
target,
|
||||
tempDir,
|
||||
manualLangs,
|
||||
args.logLevel,
|
||||
state.youtubeSubgenChildren,
|
||||
),
|
||||
);
|
||||
|
||||
const manualSubs = scanSubtitleCandidates(
|
||||
tempDir,
|
||||
knownFiles,
|
||||
'manual',
|
||||
primaryLangCodes,
|
||||
secondaryLangCodes,
|
||||
);
|
||||
for (const sub of manualSubs) knownFiles.add(sub.path);
|
||||
const selectedPrimary = pickBestCandidate(
|
||||
manualSubs.filter((entry) => entry.lang === 'primary'),
|
||||
);
|
||||
const selectedSecondary = pickBestCandidate(
|
||||
manualSubs.filter((entry) => entry.lang === 'secondary'),
|
||||
);
|
||||
|
||||
const plan = planYoutubeSubtitleGeneration({
|
||||
hasPrimaryManualSubtitle: Boolean(selectedPrimary),
|
||||
hasSecondaryManualSubtitle: Boolean(selectedSecondary),
|
||||
secondaryCanTranslate: secondaryCanUseWhisperTranslate,
|
||||
});
|
||||
|
||||
let primaryAlias = '';
|
||||
let secondaryAlias = '';
|
||||
|
||||
if (selectedPrimary) {
|
||||
log(
|
||||
'info',
|
||||
args.logLevel,
|
||||
`Using native YouTube subtitle track for primary (${primaryLabel}); skipping external subtitle copy.`,
|
||||
);
|
||||
}
|
||||
if (selectedSecondary) {
|
||||
log(
|
||||
'info',
|
||||
args.logLevel,
|
||||
`Using native YouTube subtitle track for secondary (${secondaryLabel}); skipping external subtitle copy.`,
|
||||
);
|
||||
}
|
||||
|
||||
if (plan.generatePrimarySubtitle || plan.generateSecondarySubtitle) {
|
||||
const whisperBin = resolveWhisperBinary(args);
|
||||
const modelPath = args.whisperModel.trim()
|
||||
? path.resolve(resolvePathMaybe(args.whisperModel.trim()))
|
||||
: '';
|
||||
const hasWhisperFallback = !!whisperBin && !!modelPath && fs.existsSync(modelPath);
|
||||
|
||||
if (!hasWhisperFallback) {
|
||||
log(
|
||||
'warn',
|
||||
args.logLevel,
|
||||
'Whisper fallback is not configured; continuing with available subtitle tracks.',
|
||||
);
|
||||
} else {
|
||||
const audioPath = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: 'Starting fallback audio extraction for subtitle generation',
|
||||
finishMessage: 'Finished fallback audio extraction',
|
||||
failureMessage: 'Fallback audio extraction failed',
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
() => downloadYoutubeAudio(target, args, tempDir, state.youtubeSubgenChildren),
|
||||
);
|
||||
const whisperAudioPath = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: 'Starting ffmpeg audio prep for whisper',
|
||||
finishMessage: 'Finished ffmpeg audio prep for whisper',
|
||||
failureMessage: 'ffmpeg audio prep for whisper failed',
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
() => convertAudioForWhisper(audioPath, tempDir),
|
||||
);
|
||||
|
||||
if (plan.generatePrimarySubtitle) {
|
||||
try {
|
||||
const primaryPrefix = path.join(tempDir, `${basename}.${primaryLabel}`);
|
||||
const primarySrt = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: `Starting whisper primary subtitle generation (${primaryLabel})`,
|
||||
finishMessage: `Finished whisper primary subtitle generation (${primaryLabel})`,
|
||||
failureMessage: `Whisper primary subtitle generation failed (${primaryLabel})`,
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
() =>
|
||||
runWhisper(whisperBin!, args, {
|
||||
modelPath,
|
||||
audioPath: whisperAudioPath,
|
||||
language: args.youtubeWhisperSourceLanguage,
|
||||
translate: false,
|
||||
outputPrefix: primaryPrefix,
|
||||
}),
|
||||
);
|
||||
const fixedPrimary = await maybeFixSubtitleWithAi(
|
||||
primarySrt,
|
||||
args,
|
||||
args.youtubeWhisperSourceLanguage,
|
||||
);
|
||||
primaryAlias = await publishTrack(
|
||||
'primary',
|
||||
fixedPrimary === primarySrt ? 'whisper' : 'whisper-fixed',
|
||||
fixedPrimary,
|
||||
basename,
|
||||
);
|
||||
} catch (error) {
|
||||
log(
|
||||
'warn',
|
||||
args.logLevel,
|
||||
`Failed to generate primary subtitle via whisper fallback: ${(error as Error).message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (plan.generateSecondarySubtitle) {
|
||||
try {
|
||||
const secondaryPrefix = path.join(tempDir, `${basename}.${secondaryLabel}`);
|
||||
const secondarySrt = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: `Starting whisper secondary subtitle generation (${secondaryLabel})`,
|
||||
finishMessage: `Finished whisper secondary subtitle generation (${secondaryLabel})`,
|
||||
failureMessage: `Whisper secondary subtitle generation failed (${secondaryLabel})`,
|
||||
log: (level, message) => log(level, args.logLevel, message),
|
||||
},
|
||||
() =>
|
||||
runWhisper(whisperBin!, args, {
|
||||
modelPath,
|
||||
audioPath: whisperAudioPath,
|
||||
language: args.youtubeWhisperSourceLanguage,
|
||||
translate: true,
|
||||
outputPrefix: secondaryPrefix,
|
||||
}),
|
||||
);
|
||||
const fixedSecondary = await maybeFixSubtitleWithAi(secondarySrt, args);
|
||||
secondaryAlias = await publishTrack(
|
||||
'secondary',
|
||||
fixedSecondary === secondarySrt ? 'whisper-translate' : 'whisper-translate-fixed',
|
||||
fixedSecondary,
|
||||
basename,
|
||||
);
|
||||
} catch (error) {
|
||||
log(
|
||||
'warn',
|
||||
args.logLevel,
|
||||
`Failed to generate secondary subtitle via whisper fallback: ${(error as Error).message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!secondaryCanUseWhisperTranslate && !selectedSecondary) {
|
||||
log(
|
||||
'warn',
|
||||
args.logLevel,
|
||||
`Secondary subtitle language (${secondaryLabel}) has no whisper translate fallback; relying on manual subtitles only.`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!primaryAlias && !secondaryAlias && !selectedPrimary && !selectedSecondary) {
|
||||
throw new Error('Failed to generate any subtitle tracks.');
|
||||
}
|
||||
if ((!primaryAlias && !selectedPrimary) || (!secondaryAlias && !selectedSecondary)) {
|
||||
log(
|
||||
'warn',
|
||||
args.logLevel,
|
||||
`Generated partial subtitle result: primary=${primaryAlias || selectedPrimary ? 'ok' : 'missing'}, secondary=${secondaryAlias || selectedSecondary ? 'ok' : 'missing'}`,
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
basename,
|
||||
primaryPath: primaryAlias || undefined,
|
||||
secondaryPath: secondaryAlias || undefined,
|
||||
primaryNative: Boolean(selectedPrimary),
|
||||
secondaryNative: Boolean(selectedSecondary),
|
||||
};
|
||||
} catch (error) {
|
||||
keepTemp = true;
|
||||
throw error;
|
||||
} finally {
|
||||
if (keepTemp) {
|
||||
log('warn', args.logLevel, `Keeping subtitle temp dir: ${tempDir}`);
|
||||
} else {
|
||||
try {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// ignore cleanup failures
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { runLoggedYoutubePhase } from './progress';
|
||||
|
||||
test('runLoggedYoutubePhase logs start and finish with elapsed time', async () => {
|
||||
const entries: Array<{ level: 'info' | 'warn'; message: string }> = [];
|
||||
let nowMs = 1_000;
|
||||
|
||||
const result = await runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: 'Starting subtitle probe',
|
||||
finishMessage: 'Finished subtitle probe',
|
||||
log: (level, message) => entries.push({ level, message }),
|
||||
now: () => nowMs,
|
||||
},
|
||||
async () => {
|
||||
nowMs = 2_500;
|
||||
return 'ok';
|
||||
},
|
||||
);
|
||||
|
||||
assert.equal(result, 'ok');
|
||||
assert.deepEqual(entries, [
|
||||
{ level: 'info', message: 'Starting subtitle probe' },
|
||||
{ level: 'info', message: 'Finished subtitle probe (1.5s)' },
|
||||
]);
|
||||
});
|
||||
|
||||
test('runLoggedYoutubePhase logs failure with elapsed time and rethrows', async () => {
|
||||
const entries: Array<{ level: 'info' | 'warn'; message: string }> = [];
|
||||
let nowMs = 5_000;
|
||||
|
||||
await assert.rejects(
|
||||
runLoggedYoutubePhase(
|
||||
{
|
||||
startMessage: 'Starting whisper primary',
|
||||
finishMessage: 'Finished whisper primary',
|
||||
failureMessage: 'Failed whisper primary',
|
||||
log: (level, message) => entries.push({ level, message }),
|
||||
now: () => nowMs,
|
||||
},
|
||||
async () => {
|
||||
nowMs = 8_200;
|
||||
throw new Error('boom');
|
||||
},
|
||||
),
|
||||
/boom/,
|
||||
);
|
||||
|
||||
assert.deepEqual(entries, [
|
||||
{ level: 'info', message: 'Starting whisper primary' },
|
||||
{ level: 'warn', message: 'Failed whisper primary after 3.2s: boom' },
|
||||
]);
|
||||
});
|
||||
@@ -1,33 +0,0 @@
|
||||
type PhaseLogLevel = 'info' | 'warn';
|
||||
|
||||
export interface RunLoggedYoutubePhaseOptions {
|
||||
startMessage: string;
|
||||
finishMessage: string;
|
||||
failureMessage?: string;
|
||||
log: (level: PhaseLogLevel, message: string) => void;
|
||||
now?: () => number;
|
||||
}
|
||||
|
||||
function formatElapsedMs(elapsedMs: number): string {
|
||||
const seconds = Math.max(0, elapsedMs) / 1000;
|
||||
return `${seconds.toFixed(1)}s`;
|
||||
}
|
||||
|
||||
export async function runLoggedYoutubePhase<T>(
|
||||
options: RunLoggedYoutubePhaseOptions,
|
||||
run: () => Promise<T>,
|
||||
): Promise<T> {
|
||||
const now = options.now ?? Date.now;
|
||||
const startedAt = now();
|
||||
options.log('info', options.startMessage);
|
||||
try {
|
||||
const result = await run();
|
||||
options.log('info', `${options.finishMessage} (${formatElapsedMs(now() - startedAt)})`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
const prefix = options.failureMessage ?? options.finishMessage;
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
options.log('warn', `${prefix} after ${formatElapsedMs(now() - startedAt)}: ${message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { parseSrt, stringifySrt } from './srt';
|
||||
|
||||
test('parseSrt reads cue numbering timing and text', () => {
|
||||
const cues = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんにちは
|
||||
|
||||
2
|
||||
00:00:02,500 --> 00:00:03,000
|
||||
世界
|
||||
`);
|
||||
|
||||
assert.equal(cues.length, 2);
|
||||
assert.equal(cues[0]?.start, '00:00:01,000');
|
||||
assert.equal(cues[0]?.end, '00:00:02,000');
|
||||
assert.equal(cues[0]?.text, 'こんにちは');
|
||||
assert.equal(cues[1]?.text, '世界');
|
||||
});
|
||||
|
||||
test('stringifySrt preserves parseable cue structure', () => {
|
||||
const roundTrip = stringifySrt(
|
||||
parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんにちは
|
||||
`),
|
||||
);
|
||||
|
||||
assert.match(roundTrip, /1\n00:00:01,000 --> 00:00:02,000\nこんにちは/);
|
||||
});
|
||||
@@ -1,40 +0,0 @@
|
||||
export interface SrtCue {
|
||||
index: number;
|
||||
start: string;
|
||||
end: string;
|
||||
text: string;
|
||||
}
|
||||
|
||||
const TIMING_LINE_PATTERN =
|
||||
/^(?<start>\d{2}:\d{2}:\d{2},\d{3}) --> (?<end>\d{2}:\d{2}:\d{2},\d{3})$/;
|
||||
|
||||
export function parseSrt(content: string): SrtCue[] {
|
||||
const normalized = content.replace(/\r\n/g, '\n').trim();
|
||||
if (!normalized) return [];
|
||||
|
||||
return normalized
|
||||
.split(/\n{2,}/)
|
||||
.map((block) => {
|
||||
const lines = block.split('\n');
|
||||
const index = Number.parseInt(lines[0] || '', 10);
|
||||
const timingLine = lines[1] || '';
|
||||
const timingMatch = TIMING_LINE_PATTERN.exec(timingLine);
|
||||
if (!Number.isInteger(index) || !timingMatch?.groups) {
|
||||
throw new Error(`Invalid SRT cue block: ${block}`);
|
||||
}
|
||||
return {
|
||||
index,
|
||||
start: timingMatch.groups.start!,
|
||||
end: timingMatch.groups.end!,
|
||||
text: lines.slice(2).join('\n').trim(),
|
||||
} satisfies SrtCue;
|
||||
})
|
||||
.filter((cue) => cue.text.length > 0);
|
||||
}
|
||||
|
||||
export function stringifySrt(cues: SrtCue[]): string {
|
||||
return cues
|
||||
.map((cue, idx) => `${idx + 1}\n${cue.start} --> ${cue.end}\n${cue.text.trim()}\n`)
|
||||
.join('\n')
|
||||
.trimEnd();
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { applyFixedCueBatch, parseAiSubtitleFixResponse } from './subtitle-fix-ai';
|
||||
import { parseSrt } from './srt';
|
||||
|
||||
test('applyFixedCueBatch accepts content-only fixes with identical timing', () => {
|
||||
const original = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんいちは
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
世界
|
||||
`);
|
||||
const fixed = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんにちは
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
世界
|
||||
`);
|
||||
|
||||
const merged = applyFixedCueBatch(original, fixed);
|
||||
assert.equal(merged[0]?.text, 'こんにちは');
|
||||
});
|
||||
|
||||
test('applyFixedCueBatch rejects changed timestamps', () => {
|
||||
const original = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんいちは
|
||||
`);
|
||||
const fixed = parseSrt(`1
|
||||
00:00:01,100 --> 00:00:02,000
|
||||
こんにちは
|
||||
`);
|
||||
|
||||
assert.throws(() => applyFixedCueBatch(original, fixed), /timestamps/i);
|
||||
});
|
||||
|
||||
test('parseAiSubtitleFixResponse accepts valid SRT wrapped in markdown fences', () => {
|
||||
const original = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんいちは
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
世界
|
||||
`);
|
||||
|
||||
const parsed = parseAiSubtitleFixResponse(
|
||||
original,
|
||||
'```srt\n1\n00:00:01,000 --> 00:00:02,000\nこんにちは\n\n2\n00:00:03,000 --> 00:00:04,000\n世界\n```',
|
||||
);
|
||||
|
||||
assert.equal(parsed[0]?.text, 'こんにちは');
|
||||
assert.equal(parsed[1]?.text, '世界');
|
||||
});
|
||||
|
||||
test('parseAiSubtitleFixResponse accepts text-only one-block-per-cue output', () => {
|
||||
const original = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんいちは
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
世界
|
||||
`);
|
||||
|
||||
const parsed = parseAiSubtitleFixResponse(
|
||||
original,
|
||||
`こんにちは
|
||||
|
||||
世界`,
|
||||
);
|
||||
|
||||
assert.equal(parsed[0]?.start, '00:00:01,000');
|
||||
assert.equal(parsed[0]?.text, 'こんにちは');
|
||||
assert.equal(parsed[1]?.end, '00:00:04,000');
|
||||
assert.equal(parsed[1]?.text, '世界');
|
||||
});
|
||||
|
||||
test('parseAiSubtitleFixResponse rejects unrecoverable text-only output', () => {
|
||||
const original = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんいちは
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
世界
|
||||
`);
|
||||
|
||||
assert.throws(
|
||||
() => parseAiSubtitleFixResponse(original, 'こんにちは\n世界\n余分です'),
|
||||
/cue block|cue count/i,
|
||||
);
|
||||
});
|
||||
|
||||
test('parseAiSubtitleFixResponse rejects language drift for primary Japanese subtitles', () => {
|
||||
const original = parseSrt(`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
こんにちは
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
今日はいい天気ですね
|
||||
`);
|
||||
|
||||
assert.throws(
|
||||
() =>
|
||||
parseAiSubtitleFixResponse(
|
||||
original,
|
||||
`1
|
||||
00:00:01,000 --> 00:00:02,000
|
||||
Hello
|
||||
|
||||
2
|
||||
00:00:03,000 --> 00:00:04,000
|
||||
The weather is nice today
|
||||
`,
|
||||
'ja',
|
||||
),
|
||||
/language/i,
|
||||
);
|
||||
});
|
||||
@@ -1,213 +0,0 @@
|
||||
import type { LauncherAiConfig } from '../types.js';
|
||||
import { requestAiChatCompletion, resolveAiApiKey } from '../../src/ai/client.js';
|
||||
import { parseSrt, stringifySrt, type SrtCue } from './srt.js';
|
||||
|
||||
const DEFAULT_SUBTITLE_FIX_PROMPT =
|
||||
'Fix transcription mistakes only. Preserve cue numbering, timestamps, and valid SRT formatting exactly. Return only corrected SRT.';
|
||||
|
||||
const SRT_BLOCK_PATTERN =
|
||||
/(?:^|\n)(\d+\n\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}[\s\S]*)$/;
|
||||
const CODE_FENCE_PATTERN = /^```(?:\w+)?\s*\n([\s\S]*?)\n```$/;
|
||||
const JAPANESE_CHAR_PATTERN = /[\p{Script=Hiragana}\p{Script=Katakana}\p{Script=Han}]/gu;
|
||||
const LATIN_LETTER_PATTERN = /\p{Script=Latin}/gu;
|
||||
|
||||
export function applyFixedCueBatch(original: SrtCue[], fixed: SrtCue[]): SrtCue[] {
|
||||
if (original.length !== fixed.length) {
|
||||
throw new Error('Fixed subtitle batch must preserve cue count.');
|
||||
}
|
||||
|
||||
return original.map((cue, index) => {
|
||||
const nextCue = fixed[index];
|
||||
if (!nextCue) {
|
||||
throw new Error('Missing fixed subtitle cue.');
|
||||
}
|
||||
if (cue.start !== nextCue.start || cue.end !== nextCue.end) {
|
||||
throw new Error('Fixed subtitle batch must preserve cue timestamps.');
|
||||
}
|
||||
return {
|
||||
...cue,
|
||||
text: nextCue.text,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
function chunkCues(cues: SrtCue[], size: number): SrtCue[][] {
|
||||
const chunks: SrtCue[][] = [];
|
||||
for (let index = 0; index < cues.length; index += size) {
|
||||
chunks.push(cues.slice(index, index + size));
|
||||
}
|
||||
return chunks;
|
||||
}
|
||||
|
||||
function normalizeAiSubtitleFixCandidates(content: string): string[] {
|
||||
const trimmed = content.replace(/\r\n/g, '\n').trim();
|
||||
if (!trimmed) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const candidates = new Set<string>([trimmed]);
|
||||
const fenced = CODE_FENCE_PATTERN.exec(trimmed)?.[1]?.trim();
|
||||
if (fenced) {
|
||||
candidates.add(fenced);
|
||||
}
|
||||
|
||||
const srtBlock = SRT_BLOCK_PATTERN.exec(trimmed)?.[1]?.trim();
|
||||
if (srtBlock) {
|
||||
candidates.add(srtBlock);
|
||||
}
|
||||
|
||||
return [...candidates];
|
||||
}
|
||||
|
||||
function parseTextOnlyCueBatch(original: SrtCue[], content: string): SrtCue[] {
|
||||
const paragraphBlocks = content
|
||||
.split(/\n{2,}/)
|
||||
.map((block) => block.trim())
|
||||
.filter((block) => block.length > 0);
|
||||
if (paragraphBlocks.length === original.length) {
|
||||
return original.map((cue, index) => ({
|
||||
...cue,
|
||||
text: paragraphBlocks[index]!,
|
||||
}));
|
||||
}
|
||||
|
||||
const lineBlocks = content
|
||||
.split('\n')
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0);
|
||||
if (lineBlocks.length === original.length) {
|
||||
return original.map((cue, index) => ({
|
||||
...cue,
|
||||
text: lineBlocks[index]!,
|
||||
}));
|
||||
}
|
||||
|
||||
throw new Error('Fixed subtitle batch must preserve cue count.');
|
||||
}
|
||||
|
||||
function countPatternMatches(content: string, pattern: RegExp): number {
|
||||
pattern.lastIndex = 0;
|
||||
return [...content.matchAll(pattern)].length;
|
||||
}
|
||||
|
||||
function isJapaneseLanguageCode(language: string | undefined): boolean {
|
||||
if (!language) return false;
|
||||
const normalized = language.trim().toLowerCase();
|
||||
return normalized === 'ja' || normalized === 'jp' || normalized === 'jpn';
|
||||
}
|
||||
|
||||
function validateExpectedLanguage(
|
||||
original: SrtCue[],
|
||||
fixed: SrtCue[],
|
||||
expectedLanguage: string | undefined,
|
||||
): void {
|
||||
if (!isJapaneseLanguageCode(expectedLanguage)) return;
|
||||
|
||||
const originalText = original.map((cue) => cue.text).join('\n');
|
||||
const fixedText = fixed.map((cue) => cue.text).join('\n');
|
||||
const originalJapaneseChars = countPatternMatches(originalText, JAPANESE_CHAR_PATTERN);
|
||||
if (originalJapaneseChars < 4) return;
|
||||
|
||||
const fixedJapaneseChars = countPatternMatches(fixedText, JAPANESE_CHAR_PATTERN);
|
||||
const fixedLatinLetters = countPatternMatches(fixedText, LATIN_LETTER_PATTERN);
|
||||
if (fixedJapaneseChars === 0 && fixedLatinLetters >= 4) {
|
||||
throw new Error('Fixed subtitle batch changed language away from expected Japanese.');
|
||||
}
|
||||
}
|
||||
|
||||
export function parseAiSubtitleFixResponse(
|
||||
original: SrtCue[],
|
||||
content: string,
|
||||
expectedLanguage?: string,
|
||||
): SrtCue[] {
|
||||
const candidates = normalizeAiSubtitleFixCandidates(content);
|
||||
let lastError: Error | null = null;
|
||||
|
||||
for (const candidate of candidates) {
|
||||
try {
|
||||
const parsed = parseSrt(candidate);
|
||||
validateExpectedLanguage(original, parsed, expectedLanguage);
|
||||
return parsed;
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
}
|
||||
}
|
||||
|
||||
for (const candidate of candidates) {
|
||||
try {
|
||||
const parsed = parseTextOnlyCueBatch(original, candidate);
|
||||
validateExpectedLanguage(original, parsed, expectedLanguage);
|
||||
return parsed;
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
}
|
||||
}
|
||||
|
||||
throw lastError ?? new Error('AI subtitle fix returned empty content.');
|
||||
}
|
||||
|
||||
export async function fixSubtitleWithAi(
|
||||
subtitleContent: string,
|
||||
aiConfig: LauncherAiConfig,
|
||||
logWarning: (message: string) => void,
|
||||
expectedLanguage?: string,
|
||||
): Promise<string | null> {
|
||||
if (aiConfig.enabled !== true) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const apiKey = await resolveAiApiKey(aiConfig);
|
||||
if (!apiKey) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const cues = parseSrt(subtitleContent);
|
||||
if (cues.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const fixedChunks: SrtCue[] = [];
|
||||
for (const chunk of chunkCues(cues, 25)) {
|
||||
const fixedContent = await requestAiChatCompletion(
|
||||
{
|
||||
apiKey,
|
||||
baseUrl: aiConfig.baseUrl,
|
||||
model: aiConfig.model,
|
||||
timeoutMs: aiConfig.requestTimeoutMs,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: aiConfig.systemPrompt?.trim() || DEFAULT_SUBTITLE_FIX_PROMPT,
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: stringifySrt(chunk),
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
logWarning,
|
||||
},
|
||||
);
|
||||
if (!fixedContent) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let parsedFixed: SrtCue[];
|
||||
try {
|
||||
parsedFixed = parseAiSubtitleFixResponse(chunk, fixedContent, expectedLanguage);
|
||||
} catch (error) {
|
||||
logWarning(`AI subtitle fix returned invalid SRT: ${(error as Error).message}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
fixedChunks.push(...applyFixedCueBatch(chunk, parsedFixed));
|
||||
} catch (error) {
|
||||
logWarning(`AI subtitle fix validation failed: ${(error as Error).message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
return stringifySrt(fixedChunks);
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { buildWhisperArgs } from './whisper';
|
||||
|
||||
test('buildWhisperArgs includes threads and optional VAD flags', () => {
|
||||
assert.deepEqual(
|
||||
buildWhisperArgs({
|
||||
modelPath: '/models/ggml-large-v2.bin',
|
||||
audioPath: '/tmp/input.wav',
|
||||
outputPrefix: '/tmp/output',
|
||||
language: 'ja',
|
||||
translate: false,
|
||||
threads: 8,
|
||||
vadModelPath: '/models/vad.bin',
|
||||
}),
|
||||
[
|
||||
'-m',
|
||||
'/models/ggml-large-v2.bin',
|
||||
'-f',
|
||||
'/tmp/input.wav',
|
||||
'--output-srt',
|
||||
'--output-file',
|
||||
'/tmp/output',
|
||||
'--language',
|
||||
'ja',
|
||||
'--threads',
|
||||
'8',
|
||||
'-vm',
|
||||
'/models/vad.bin',
|
||||
'--vad',
|
||||
],
|
||||
);
|
||||
});
|
||||
|
||||
test('buildWhisperArgs includes translate flag when requested', () => {
|
||||
assert.ok(
|
||||
buildWhisperArgs({
|
||||
modelPath: '/models/base.bin',
|
||||
audioPath: '/tmp/input.wav',
|
||||
outputPrefix: '/tmp/output',
|
||||
language: 'ja',
|
||||
translate: true,
|
||||
threads: 4,
|
||||
}).includes('--translate'),
|
||||
);
|
||||
});
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user