Repository: repomirrorhq/repomirror Branch: main Commit: e160c79740fc Files: 83 Total size: 666.0 KB Directory structure: gitextract_fi4hehdq/ ├── .eslintrc.js ├── .github/ │ └── workflows/ │ └── ci.yml ├── .gitignore ├── .repomirror/ │ ├── .gitignore │ ├── prompt.md │ ├── ralph.sh │ └── sync.sh ├── @IMPLEMENTATION_PLAN.md ├── README.md ├── VALIDATION_PLAN.md ├── assets/ │ ├── better-use.webm │ └── repomirror.webm ├── coverage/ │ ├── base.css │ ├── block-navigation.js │ ├── coverage-final.json │ ├── index.html │ ├── prettify.css │ ├── prettify.js │ ├── sorter.js │ └── src/ │ ├── cli.ts.html │ ├── commands/ │ │ ├── index.html │ │ ├── init.ts.html │ │ ├── sync-forever.ts.html │ │ ├── sync-one.ts.html │ │ ├── sync.ts.html │ │ └── visualize.ts.html │ └── index.html ├── docs/ │ └── remote-repo-design.md ├── hack/ │ ├── ralph-validate.sh │ ├── ralph.sh │ └── visualize.ts ├── package.json ├── prompt-validate.md ├── prompt.md ├── prompts/ │ ├── ai-sdk-python.md │ ├── assistant-ui-vue.md │ ├── better-use.md │ ├── open-convex.md │ ├── open-dedalus.md │ └── repomirror.md ├── repomirror.md ├── repomirror.yaml ├── specs/ │ ├── devtooling.md │ ├── github_actions.md │ └── repomirror.md ├── specs_deprecated_ignore/ │ ├── github_actions.md │ ├── remote_sync.md │ └── sync_check ├── src/ │ ├── cli.ts │ ├── commands/ │ │ ├── dispatch-sync.ts │ │ ├── github-actions.ts │ │ ├── init.ts │ │ ├── pull.ts │ │ ├── push.ts │ │ ├── remote.ts │ │ ├── setup-github-pr-sync.ts │ │ ├── sync-forever.ts │ │ ├── sync-one.ts │ │ ├── sync.ts │ │ └── visualize.ts │ └── templates/ │ ├── gitignore.template │ ├── ralph.sh.template │ └── sync.sh.template ├── test-resolve.js ├── tests/ │ ├── README.md │ ├── basic.test.ts │ ├── commands/ │ │ ├── dispatch-sync.test.ts │ │ ├── github-actions.test.ts │ │ ├── init.test.ts │ │ ├── pull.test.ts │ │ ├── push.test.ts │ │ ├── setup-github-pr-sync.test.ts │ │ ├── simple.test.ts │ │ ├── sync-forever.test.ts │ │ ├── sync-one.test.ts │ │ ├── sync.test.ts │ │ └── visualize.test.ts │ ├── helpers/ │ │ ├── fixtures.ts │ │ ├── index.ts │ │ └── test-utils.ts │ └── setup.ts ├── tsconfig.json └── vitest.config.ts ================================================ FILE CONTENTS ================================================ ================================================ FILE: .eslintrc.js ================================================ module.exports = { parser: '@typescript-eslint/parser', plugins: ['@typescript-eslint'], extends: [ 'eslint:recommended', ], parserOptions: { ecmaVersion: 2022, sourceType: 'module', }, env: { node: true, es2022: true, }, rules: { '@typescript-eslint/no-unused-vars': 'off', 'no-unused-vars': 'off', }, }; ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: push: branches: [ main ] pull_request: branches: [ main ] jobs: test: runs-on: ubuntu-latest strategy: matrix: node-version: [18.x, 20.x] steps: - uses: actions/checkout@v3 - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v3 with: node-version: ${{ matrix.node-version }} cache: 'npm' - name: Install dependencies run: npm ci - name: Run lint run: npm run lint - name: Run type check run: npm run check - name: Run tests run: npm test - name: Build run: npm run build release: needs: test runs-on: ubuntu-latest if: github.ref == 'refs/heads/main' && github.event_name == 'push' steps: - uses: actions/checkout@v3 - name: Use Node.js 20.x uses: actions/setup-node@v3 with: node-version: 20.x cache: 'npm' registry-url: 'https://registry.npmjs.org' - name: Install dependencies run: npm ci - name: Build run: npm run build - name: Check if version changed id: version run: | CURRENT_VERSION=$(node -p "require('./package.json').version") echo "current_version=$CURRENT_VERSION" >> $GITHUB_OUTPUT # Check if this version exists on npm if npm view repomirror@$CURRENT_VERSION version 2>/dev/null; then echo "version_exists=true" >> $GITHUB_OUTPUT else echo "version_exists=false" >> $GITHUB_OUTPUT fi - name: Publish to npm if: steps.version.outputs.version_exists == 'false' run: npm publish env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} ================================================ FILE: .gitignore ================================================ dist/ claude_output.jsonl draft.md images/ *-draft.md ================================================ FILE: .repomirror/.gitignore ================================================ claude_output.jsonl ================================================ FILE: .repomirror/prompt.md ================================================ Your job is to port repomirror (TypeScript) to repomirror-py (Python) and maintain the repository. You have access to the current ./ repository as well as the target /tmp/test-target2 repository. Make a commit and push your changes after every single file edit. Use the /tmp/test-target2/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. ================================================ FILE: .repomirror/ralph.sh ================================================ #!/bin/bash while :; do ./.repomirror/sync.sh echo -e "===SLEEP===\n===SLEEP===\n"; echo 'looping'; sleep 10; done ================================================ FILE: .repomirror/sync.sh ================================================ #!/bin/bash cat .repomirror/prompt.md | \ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir /tmp/test-target2 | \ tee -a .repomirror/claude_output.jsonl | \ npx repomirror visualize --debug; ================================================ FILE: @IMPLEMENTATION_PLAN.md ================================================ # RepoMirror Implementation Plan ## PRIORITY -1: finish rename ✅ - [x] change simonsays to repomirror everywhere, files, docs, names referenfces, everything ✅ - Updated package.json binary name from "simonsays" to "repomirror" - Renamed simonsays.yaml to repomirror.yaml - Updated CLI name and all help text references throughout src/cli.ts - Changed all ".simonsays" directory references to ".repomirror" - Renamed createSimonSaysFiles function to createRepoMirrorFiles - Updated all configuration file references from "simonsays.yaml" to "repomirror.yaml" - Updated all "npx simonsays" command references to "npx repomirror" - Migrated existing .simonsays directory to .repomirror with proper script updates - Updated all test files to use new naming conventions - Updated README.md and documentation - Removed orphaned test file for non-existent issue-fixer command - All 277 tests passing with TypeScript compilation successful ## PRIORITY 0: fix critical bugs ✅ - [x] sync-forever doesn't exit on ctrl+c ✅ - Fixed signal handling in sync-forever command for both legacy ralph.sh and new approach - Added proper subprocess management with SIGINT/SIGTERM handlers - [x] check other commands for ctrl+c handling as well ✅ - Added signal handling to sync command - Added signal handling to init command (Claude SDK query) - Added signal handling to issue-fixer command (Claude SDK query) - Added signal handling to pull command (for both sync and sync-forever) - Added signal handling to visualize command - All long-running commands now properly handle Ctrl+C ## Priority 1: Core Infrastructure ✅ - [x] Create implementation plan - [x] Initialize npm project structure - [x] Create basic CLI entry point ## Priority 2: Init Command ✅ - [x] Implement `npx repomirror init` command - [x] Generate transformation prompt using Claude SDK - [x] Perform preflight checks (git, claude, directories) - [x] Create .repomirror/ directory with scripts - [x] Ensure all preflight checks have verbose output - [x] Ensure all prompts/cli flags are stashed to a repomirror.yaml during setup, and defaults are populated from the yaml file if present (instead of core defaults) - [x] FIX CRITICAL BUG: `npx repomirror init` hangs in generateTransformationPrompt function - The Claude SDK async iterator needs proper handling to avoid infinite loops - Must break after receiving result (currently only breaks on non-error results) - Must handle ALL message types, not just "result" type - See updated spec.md for correct implementation pattern - [x] improve the cli init output to match the spec ✅ - Updated output format to match spec exactly (removed bullet points, added file list) - Fixed typo "repositorty" → "repository" in prompt examples - [x] update transformation prompt to match the spec ✅ ## Priority 3: Sync Commands ✅ - [x] Implement `sync` command to run sync.sh - [x] Implement `sync-one` command (alias for sync) - [x] Implement `sync-forever` command to run ralph.sh - [x] Implement `visualize` command for output formatting ## Priority 4: Advanced Features - [x] Add comprehensive tests for all commands ✅ - [x] Add remote repo support (push/pull) ✅ - Implemented `remote` command for managing remote repositories - Implemented `push` command with auto-commit and multi-remote support - Implemented `pull` command with source sync integration - Enhanced sync commands with auto-push capabilities - [x] Add validation script for init command ✅ - Created `hack/ralph-validate.sh` for automated testing - Added SKIP_CLAUDE_TEST environment variable for testing mode - [x] GitHub Actions integration ✅ - Implemented `github-actions` command for workflow generation - Creates customizable GitHub Actions workflow for automated syncing - Supports scheduled runs, manual triggers, and push-triggered syncs - Fixed linting error with escaped characters in workflow template - [x] Issue fixer functionality ✅ - Implemented `issue-fixer` command for automatic issue detection and fixing - Detects build, test, lint, and type checking issues across multiple languages - Supports Node/TypeScript, Python, and Go projects - Uses Claude SDK to intelligently fix detected issues - Interactive mode for selective issue fixing - Comprehensive test suite with 268 passing tests ## Priority 5: GitHub Actions PR Sync Features ✅ - [x] Implement `setup-github-pr-sync` command ✅ - Creates GitHub Actions workflow for PR-triggered syncing - Configurable loop iterations (1-10 times) for sync-one command - Persists settings to repomirror.yaml for future use - Overwrite protection with user confirmation - Creates `.github/workflows/repomirror.yml` workflow file - Comprehensive test coverage (8 tests) - [x] Implement `dispatch-sync` command ✅ - Dispatches workflow runs using GitHub CLI - Automatic repository detection from git remotes - User confirmation with `--yes` flag to skip - Quiet mode with `--quiet` flag (requires `--yes`) - Comprehensive error handling for missing prerequisites - Full test coverage (17 tests) ## Current Status ✅ **FULLY IMPLEMENTED** - All planned features completed successfully: - All CLI commands implemented and working - Init command creates proper .repomirror/ structure with **template-based shell script generation** - Sync commands execute shell scripts correctly - Visualize command provides colored output - Remote repository management (add/list/remove remotes) - Push command with intelligent commit messages and multi-remote support - Pull command with auto-sync integration - Auto-push capability after sync operations - Validation script for testing init command functionality - Test mode support with SKIP_CLAUDE_TEST environment variable - GitHub Actions workflow generation for CI/CD - Issue fixer command for automatic issue detection and resolution - GitHub Actions PR sync commands (`setup-github-pr-sync` and `dispatch-sync`) - **Shell script templates properly bundled in dist/ package** (matches spec requirement) - Comprehensive test suite with 293 tests passing (2 skipped for interactive mode) - TypeScript build passing with full type safety - All linting checks passing - **Ready for production usage with complete feature set** ## Recent Improvements ### Template-Based Shell Script Generation - IMPLEMENTED ✅ **Issue**: The spec required "The shell scripts are included in the npm dist/ bundle and baked into the package so they can be copied out of the package root by `npx repomirror init`" but the implementation was generating scripts inline. **Solution Implemented**: - Created template files in `src/templates/`: `sync.sh.template`, `ralph.sh.template`, `gitignore.template` - Updated build process in `package.json` to copy templates to `dist/templates/` - Modified `src/commands/init.ts` to read templates from package and substitute variables - Added template location resolution (tries `dist/templates` first, falls back to `src/templates`) - Supports `${targetRepo}` variable substitution in sync script template - All 293 tests continue to pass - Now fully compliant with spec requirement **Files Modified**: - `src/templates/sync.sh.template` (new) - `src/templates/ralph.sh.template` (new) - `src/templates/gitignore.template` (new) - `package.json` (updated build process and files array) - `src/commands/init.ts` (replaced inline generation with template-based) - `tests/commands/init.test.ts` (updated mocks for template handling) ## Known Issues & Critical Fixes Needed ### 1. Init Command Hangs (CRITICAL) - FIXED ✅ **Problem**: The `repomirror init` command was hanging forever during prompt generation. **Root Cause**: The `generateTransformationPrompt` function (src/commands/init.ts:256-318) was using an async iterator incorrectly, not handling error cases properly. **Fix Applied**: Updated the async iterator loop to properly handle both error and success cases: - Now throws an error immediately when Claude SDK returns an error - Properly breaks the loop after receiving ANY result type - Added more descriptive error messages for debugging **Testing Completed**: - All 230 tests passing (comprehensive test coverage added) - TypeScript build successful - Ready for production use ## Testing Instructions For testing the init command without calling Claude SDK: - Set `SKIP_CLAUDE_TEST=true` environment variable - This will skip the Claude Code preflight check and use a test prompt template - The validation script `hack/ralph-validate.sh` uses this flag for automated testing ================================================ FILE: README.md ================================================ # repomirror A tool to perform transformations on code repositories with AI. Inspired by [@ghuntley](https://github.com/ghuntley)'s [ralph wiggum](https://ghuntley.com/ralph). Built by [@yonom](https://github.com/yonom), [@dexhorthy](https://github.com/dexhorthy), [@lantos1618](https://github.com/lantos1618) and [@AVGVSTVS96](https://github.com/AVGVSTVS96) Read more about the project at [repomirror.md](./repomirror.md) ## Projects Some example projects maintained by repomirror: - [better-use](https://github.com/yonom/browser-use-ts) - A port of browser-use to typescript - [ai-sdk-python](https://github.com/yonom/ai-sdk-python) - Port of vercel AI sdk to python - [assistant-ui-vue](https://github.com/yonom/assistant-ui-vue) - Port of assistant-ui-react to vue.js - [open-dedalus](https://github.com/yonom/open-dedalus) - open source clone of dedalus using `llms-full.txt` - [better-ui](https://github.com/lantos1618/better-ui/tree/lantos-aui) - Assistant UI + TRPC (ai ui with frontend and backend State) - [lynlan](https://github.com/lantos1618/lynlang) - GO + RUST + Haskel ================================================ FILE: VALIDATION_PLAN.md ================================================ ✅ validate init: [COMPLETED] - create a temp dir for source repo and add a "hello.ts" file in it ✅ - create a temp dir for target and init a git repo in it ✅ - use `repomirror init` to test, use a "translate this repo to python" ✅ - ensure that the command succeeds and generated the correct files ✅ Validation completed successfully using hack/ralph-validate.sh script. Results: - repomirror.yaml created in source directory - .repomirror/ directory created with all required files (prompt.md, sync.sh, ralph.sh, .gitignore) - Command handles CLI flags correctly (--source, --target, --instructions) - Test mode support added with SKIP_CLAUDE_TEST environment variable for automated testing BUG - CTRL + C does not work in sync-forever command ================================================ FILE: coverage/base.css ================================================ body, html { margin:0; padding: 0; height: 100%; } body { font-family: Helvetica Neue, Helvetica, Arial; font-size: 14px; color:#333; } .small { font-size: 12px; } *, *:after, *:before { -webkit-box-sizing:border-box; -moz-box-sizing:border-box; box-sizing:border-box; } h1 { font-size: 20px; margin: 0;} h2 { font-size: 14px; } pre { font: 12px/1.4 Consolas, "Liberation Mono", Menlo, Courier, monospace; margin: 0; padding: 0; -moz-tab-size: 2; -o-tab-size: 2; tab-size: 2; } a { color:#0074D9; text-decoration:none; } a:hover { text-decoration:underline; } .strong { font-weight: bold; } .space-top1 { padding: 10px 0 0 0; } .pad2y { padding: 20px 0; } .pad1y { padding: 10px 0; } .pad2x { padding: 0 20px; } .pad2 { padding: 20px; } .pad1 { padding: 10px; } .space-left2 { padding-left:55px; } .space-right2 { padding-right:20px; } .center { text-align:center; } .clearfix { display:block; } .clearfix:after { content:''; display:block; height:0; clear:both; visibility:hidden; } .fl { float: left; } @media only screen and (max-width:640px) { .col3 { width:100%; max-width:100%; } .hide-mobile { display:none!important; } } .quiet { color: #7f7f7f; color: rgba(0,0,0,0.5); } .quiet a { opacity: 0.7; } .fraction { font-family: Consolas, 'Liberation Mono', Menlo, Courier, monospace; font-size: 10px; color: #555; background: #E8E8E8; padding: 4px 5px; border-radius: 3px; vertical-align: middle; } div.path a:link, div.path a:visited { color: #333; } table.coverage { border-collapse: collapse; margin: 10px 0 0 0; padding: 0; } table.coverage td { margin: 0; padding: 0; vertical-align: top; } table.coverage td.line-count { text-align: right; padding: 0 5px 0 20px; } table.coverage td.line-coverage { text-align: right; padding-right: 10px; min-width:20px; } table.coverage td span.cline-any { display: inline-block; padding: 0 5px; width: 100%; } .missing-if-branch { display: inline-block; margin-right: 5px; border-radius: 3px; position: relative; padding: 0 4px; background: #333; color: yellow; } .skip-if-branch { display: none; margin-right: 10px; position: relative; padding: 0 4px; background: #ccc; color: white; } .missing-if-branch .typ, .skip-if-branch .typ { color: inherit !important; } .coverage-summary { border-collapse: collapse; width: 100%; } .coverage-summary tr { border-bottom: 1px solid #bbb; } .keyline-all { border: 1px solid #ddd; } .coverage-summary td, .coverage-summary th { padding: 10px; } .coverage-summary tbody { border: 1px solid #bbb; } .coverage-summary td { border-right: 1px solid #bbb; } .coverage-summary td:last-child { border-right: none; } .coverage-summary th { text-align: left; font-weight: normal; white-space: nowrap; } .coverage-summary th.file { border-right: none !important; } .coverage-summary th.pct { } .coverage-summary th.pic, .coverage-summary th.abs, .coverage-summary td.pct, .coverage-summary td.abs { text-align: right; } .coverage-summary td.file { white-space: nowrap; } .coverage-summary td.pic { min-width: 120px !important; } .coverage-summary tfoot td { } .coverage-summary .sorter { height: 10px; width: 7px; display: inline-block; margin-left: 0.5em; background: url(sort-arrow-sprite.png) no-repeat scroll 0 0 transparent; } .coverage-summary .sorted .sorter { background-position: 0 -20px; } .coverage-summary .sorted-desc .sorter { background-position: 0 -10px; } .status-line { height: 10px; } /* yellow */ .cbranch-no { background: yellow !important; color: #111; } /* dark red */ .red.solid, .status-line.low, .low .cover-fill { background:#C21F39 } .low .chart { border:1px solid #C21F39 } .highlighted, .highlighted .cstat-no, .highlighted .fstat-no, .highlighted .cbranch-no{ background: #C21F39 !important; } /* medium red */ .cstat-no, .fstat-no, .cbranch-no, .cbranch-no { background:#F6C6CE } /* light red */ .low, .cline-no { background:#FCE1E5 } /* light green */ .high, .cline-yes { background:rgb(230,245,208) } /* medium green */ .cstat-yes { background:rgb(161,215,106) } /* dark green */ .status-line.high, .high .cover-fill { background:rgb(77,146,33) } .high .chart { border:1px solid rgb(77,146,33) } /* dark yellow (gold) */ .status-line.medium, .medium .cover-fill { background: #f9cd0b; } .medium .chart { border:1px solid #f9cd0b; } /* light yellow */ .medium { background: #fff4c2; } .cstat-skip { background: #ddd; color: #111; } .fstat-skip { background: #ddd; color: #111 !important; } .cbranch-skip { background: #ddd !important; color: #111; } span.cline-neutral { background: #eaeaea; } .coverage-summary td.empty { opacity: .5; padding-top: 4px; padding-bottom: 4px; line-height: 1; color: #888; } .cover-fill, .cover-empty { display:inline-block; height: 12px; } .chart { line-height: 0; } .cover-empty { background: white; } .cover-full { border-right: none !important; } pre.prettyprint { border: none !important; padding: 0 !important; margin: 0 !important; } .com { color: #999 !important; } .ignore-none { color: #999; font-weight: normal; } .wrapper { min-height: 100%; height: auto !important; height: 100%; margin: 0 auto -48px; } .footer, .push { height: 48px; } ================================================ FILE: coverage/block-navigation.js ================================================ /* eslint-disable */ var jumpToCode = (function init() { // Classes of code we would like to highlight in the file view var missingCoverageClasses = ['.cbranch-no', '.cstat-no', '.fstat-no']; // Elements to highlight in the file listing view var fileListingElements = ['td.pct.low']; // We don't want to select elements that are direct descendants of another match var notSelector = ':not(' + missingCoverageClasses.join('):not(') + ') > '; // becomes `:not(a):not(b) > ` // Selector that finds elements on the page to which we can jump var selector = fileListingElements.join(', ') + ', ' + notSelector + missingCoverageClasses.join(', ' + notSelector); // becomes `:not(a):not(b) > a, :not(a):not(b) > b` // The NodeList of matching elements var missingCoverageElements = document.querySelectorAll(selector); var currentIndex; function toggleClass(index) { missingCoverageElements .item(currentIndex) .classList.remove('highlighted'); missingCoverageElements.item(index).classList.add('highlighted'); } function makeCurrent(index) { toggleClass(index); currentIndex = index; missingCoverageElements.item(index).scrollIntoView({ behavior: 'smooth', block: 'center', inline: 'center' }); } function goToPrevious() { var nextIndex = 0; if (typeof currentIndex !== 'number' || currentIndex === 0) { nextIndex = missingCoverageElements.length - 1; } else if (missingCoverageElements.length > 1) { nextIndex = currentIndex - 1; } makeCurrent(nextIndex); } function goToNext() { var nextIndex = 0; if ( typeof currentIndex === 'number' && currentIndex < missingCoverageElements.length - 1 ) { nextIndex = currentIndex + 1; } makeCurrent(nextIndex); } return function jump(event) { if ( document.getElementById('fileSearch') === document.activeElement && document.activeElement != null ) { // if we're currently focused on the search input, we don't want to navigate return; } switch (event.which) { case 78: // n case 74: // j goToNext(); break; case 66: // b case 75: // k case 80: // p goToPrevious(); break; } }; })(); window.addEventListener('keydown', jumpToCode); ================================================ FILE: coverage/coverage-final.json ================================================ {"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/cli.ts": {"path":"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/cli.ts","all":true,"statementMap":{"0":{"start":{"line":1,"column":0},"end":{"line":1,"column":19}},"1":{"start":{"line":2,"column":0},"end":{"line":2,"column":36}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":39}},"3":{"start":{"line":4,"column":0},"end":{"line":4,"column":46}},"4":{"start":{"line":5,"column":0},"end":{"line":5,"column":39}},"5":{"start":{"line":6,"column":0},"end":{"line":6,"column":54}},"6":{"start":{"line":7,"column":0},"end":{"line":7,"column":49}},"7":{"start":{"line":8,"column":0},"end":{"line":8,"column":0}},"8":{"start":{"line":9,"column":0},"end":{"line":9,"column":30}},"9":{"start":{"line":10,"column":0},"end":{"line":10,"column":0}},"10":{"start":{"line":11,"column":0},"end":{"line":11,"column":7}},"11":{"start":{"line":12,"column":0},"end":{"line":12,"column":21}},"12":{"start":{"line":13,"column":0},"end":{"line":13,"column":65}},"13":{"start":{"line":14,"column":0},"end":{"line":14,"column":20}},"14":{"start":{"line":15,"column":0},"end":{"line":15,"column":0}},"15":{"start":{"line":16,"column":0},"end":{"line":16,"column":7}},"16":{"start":{"line":17,"column":0},"end":{"line":17,"column":18}},"17":{"start":{"line":18,"column":0},"end":{"line":18,"column":60}},"18":{"start":{"line":19,"column":0},"end":{"line":19,"column":16}},"19":{"start":{"line":20,"column":0},"end":{"line":20,"column":0}},"20":{"start":{"line":21,"column":0},"end":{"line":21,"column":75}},"21":{"start":{"line":22,"column":0},"end":{"line":22,"column":0}},"22":{"start":{"line":23,"column":0},"end":{"line":23,"column":7}},"23":{"start":{"line":24,"column":0},"end":{"line":24,"column":22}},"24":{"start":{"line":25,"column":0},"end":{"line":25,"column":57}},"25":{"start":{"line":26,"column":0},"end":{"line":26,"column":19}},"26":{"start":{"line":27,"column":0},"end":{"line":27,"column":0}},"27":{"start":{"line":28,"column":0},"end":{"line":28,"column":7}},"28":{"start":{"line":29,"column":0},"end":{"line":29,"column":26}},"29":{"start":{"line":30,"column":0},"end":{"line":30,"column":39}},"30":{"start":{"line":31,"column":0},"end":{"line":31,"column":23}},"31":{"start":{"line":32,"column":0},"end":{"line":32,"column":0}},"32":{"start":{"line":33,"column":0},"end":{"line":33,"column":7}},"33":{"start":{"line":34,"column":0},"end":{"line":34,"column":23}},"34":{"start":{"line":35,"column":0},"end":{"line":35,"column":48}},"35":{"start":{"line":36,"column":0},"end":{"line":36,"column":45}},"36":{"start":{"line":37,"column":0},"end":{"line":37,"column":43}},"37":{"start":{"line":38,"column":0},"end":{"line":38,"column":0}},"38":{"start":{"line":39,"column":0},"end":{"line":39,"column":16}}},"s":{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0},"branchMap":{"0":{"type":"branch","line":1,"loc":{"start":{"line":1,"column":0},"end":{"line":39,"column":-423}},"locations":[{"start":{"line":1,"column":0},"end":{"line":39,"column":-423}}]}},"b":{"0":[0]},"fnMap":{"0":{"name":"(empty-report)","decl":{"start":{"line":1,"column":0},"end":{"line":39,"column":-423}},"loc":{"start":{"line":1,"column":0},"end":{"line":39,"column":-423}},"line":1}},"f":{"0":0}} ,"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/init.ts": {"path":"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/init.ts","all":true,"statementMap":{"0":{"start":{"line":1,"column":0},"end":{"line":1,"column":36}},"1":{"start":{"line":2,"column":0},"end":{"line":2,"column":38}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":32}},"3":{"start":{"line":4,"column":0},"end":{"line":4,"column":26}},"4":{"start":{"line":5,"column":0},"end":{"line":5,"column":22}},"5":{"start":{"line":6,"column":0},"end":{"line":6,"column":50}},"6":{"start":{"line":7,"column":0},"end":{"line":7,"column":30}},"7":{"start":{"line":8,"column":0},"end":{"line":8,"column":0}},"8":{"start":{"line":9,"column":0},"end":{"line":9,"column":23}},"9":{"start":{"line":10,"column":0},"end":{"line":10,"column":21}},"10":{"start":{"line":11,"column":0},"end":{"line":11,"column":21}},"11":{"start":{"line":12,"column":0},"end":{"line":12,"column":37}},"12":{"start":{"line":13,"column":0},"end":{"line":13,"column":1}},"13":{"start":{"line":14,"column":0},"end":{"line":14,"column":0}},"14":{"start":{"line":15,"column":0},"end":{"line":15,"column":45}},"15":{"start":{"line":16,"column":0},"end":{"line":16,"column":14}},"16":{"start":{"line":17,"column":0},"end":{"line":17,"column":76}},"17":{"start":{"line":18,"column":0},"end":{"line":18,"column":4}},"18":{"start":{"line":19,"column":0},"end":{"line":19,"column":0}},"19":{"start":{"line":20,"column":0},"end":{"line":20,"column":50}},"20":{"start":{"line":21,"column":0},"end":{"line":21,"column":35}},"21":{"start":{"line":22,"column":0},"end":{"line":22,"column":40}},"22":{"start":{"line":23,"column":0},"end":{"line":23,"column":53}},"23":{"start":{"line":24,"column":0},"end":{"line":24,"column":0}},"24":{"start":{"line":25,"column":0},"end":{"line":25,"column":54}},"25":{"start":{"line":26,"column":0},"end":{"line":26,"column":5}},"26":{"start":{"line":27,"column":0},"end":{"line":27,"column":20}},"27":{"start":{"line":28,"column":0},"end":{"line":28,"column":25}},"28":{"start":{"line":29,"column":0},"end":{"line":29,"column":52}},"29":{"start":{"line":30,"column":0},"end":{"line":30,"column":20}},"30":{"start":{"line":31,"column":0},"end":{"line":31,"column":6}},"31":{"start":{"line":32,"column":0},"end":{"line":32,"column":5}},"32":{"start":{"line":33,"column":0},"end":{"line":33,"column":20}},"33":{"start":{"line":34,"column":0},"end":{"line":34,"column":25}},"34":{"start":{"line":35,"column":0},"end":{"line":35,"column":57}},"35":{"start":{"line":36,"column":0},"end":{"line":36,"column":29}},"36":{"start":{"line":37,"column":0},"end":{"line":37,"column":6}},"37":{"start":{"line":38,"column":0},"end":{"line":38,"column":5}},"38":{"start":{"line":39,"column":0},"end":{"line":39,"column":20}},"39":{"start":{"line":40,"column":0},"end":{"line":40,"column":41}},"40":{"start":{"line":41,"column":0},"end":{"line":41,"column":51}},"41":{"start":{"line":42,"column":0},"end":{"line":42,"column":58}},"42":{"start":{"line":43,"column":0},"end":{"line":43,"column":6}},"43":{"start":{"line":44,"column":0},"end":{"line":44,"column":5}},"44":{"start":{"line":45,"column":0},"end":{"line":45,"column":0}},"45":{"start":{"line":46,"column":0},"end":{"line":46,"column":29}},"46":{"start":{"line":47,"column":0},"end":{"line":47,"column":51}},"47":{"start":{"line":48,"column":0},"end":{"line":48,"column":0}},"48":{"start":{"line":49,"column":0},"end":{"line":49,"column":52}},"49":{"start":{"line":50,"column":0},"end":{"line":50,"column":69}},"50":{"start":{"line":51,"column":0},"end":{"line":51,"column":0}},"51":{"start":{"line":52,"column":0},"end":{"line":52,"column":7}},"52":{"start":{"line":53,"column":0},"end":{"line":53,"column":63}},"53":{"start":{"line":54,"column":0},"end":{"line":54,"column":25}},"54":{"start":{"line":55,"column":0},"end":{"line":55,"column":25}},"55":{"start":{"line":56,"column":0},"end":{"line":56,"column":41}},"56":{"start":{"line":57,"column":0},"end":{"line":57,"column":6}},"57":{"start":{"line":58,"column":0},"end":{"line":58,"column":0}},"58":{"start":{"line":59,"column":0},"end":{"line":59,"column":55}},"59":{"start":{"line":60,"column":0},"end":{"line":60,"column":0}},"60":{"start":{"line":61,"column":0},"end":{"line":61,"column":45}},"61":{"start":{"line":62,"column":0},"end":{"line":62,"column":32}},"62":{"start":{"line":63,"column":0},"end":{"line":63,"column":25}},"63":{"start":{"line":64,"column":0},"end":{"line":64,"column":25}},"64":{"start":{"line":65,"column":0},"end":{"line":65,"column":22}},"65":{"start":{"line":66,"column":0},"end":{"line":66,"column":6}},"66":{"start":{"line":67,"column":0},"end":{"line":67,"column":0}},"67":{"start":{"line":68,"column":0},"end":{"line":68,"column":73}},"68":{"start":{"line":69,"column":0},"end":{"line":69,"column":45}},"69":{"start":{"line":70,"column":0},"end":{"line":70,"column":16}},"70":{"start":{"line":71,"column":0},"end":{"line":71,"column":18}},"71":{"start":{"line":72,"column":0},"end":{"line":72,"column":78}},"72":{"start":{"line":73,"column":0},"end":{"line":73,"column":8}},"73":{"start":{"line":74,"column":0},"end":{"line":74,"column":6}},"74":{"start":{"line":75,"column":0},"end":{"line":75,"column":16}},"75":{"start":{"line":76,"column":0},"end":{"line":76,"column":18}},"76":{"start":{"line":77,"column":0},"end":{"line":77,"column":128}},"77":{"start":{"line":78,"column":0},"end":{"line":78,"column":8}},"78":{"start":{"line":79,"column":0},"end":{"line":79,"column":6}},"79":{"start":{"line":80,"column":0},"end":{"line":80,"column":19}},"80":{"start":{"line":81,"column":0},"end":{"line":81,"column":61}},"81":{"start":{"line":82,"column":0},"end":{"line":82,"column":18}},"82":{"start":{"line":83,"column":0},"end":{"line":83,"column":16}},"83":{"start":{"line":84,"column":0},"end":{"line":84,"column":75}},"84":{"start":{"line":85,"column":0},"end":{"line":85,"column":8}},"85":{"start":{"line":86,"column":0},"end":{"line":86,"column":6}},"86":{"start":{"line":87,"column":0},"end":{"line":87,"column":20}},"87":{"start":{"line":88,"column":0},"end":{"line":88,"column":3}},"88":{"start":{"line":89,"column":0},"end":{"line":89,"column":1}},"89":{"start":{"line":90,"column":0},"end":{"line":90,"column":0}},"90":{"start":{"line":91,"column":0},"end":{"line":91,"column":74}},"91":{"start":{"line":92,"column":0},"end":{"line":92,"column":64}},"92":{"start":{"line":93,"column":0},"end":{"line":93,"column":0}},"93":{"start":{"line":94,"column":0},"end":{"line":94,"column":7}},"94":{"start":{"line":95,"column":0},"end":{"line":95,"column":39}},"95":{"start":{"line":96,"column":0},"end":{"line":96,"column":9}},"96":{"start":{"line":97,"column":0},"end":{"line":97,"column":34}},"97":{"start":{"line":98,"column":0},"end":{"line":98,"column":13}},"98":{"start":{"line":99,"column":0},"end":{"line":99,"column":68}},"99":{"start":{"line":100,"column":0},"end":{"line":100,"column":22}},"100":{"start":{"line":101,"column":0},"end":{"line":101,"column":5}},"101":{"start":{"line":102,"column":0},"end":{"line":102,"column":0}},"102":{"start":{"line":103,"column":0},"end":{"line":103,"column":46}},"103":{"start":{"line":104,"column":0},"end":{"line":104,"column":9}},"104":{"start":{"line":105,"column":0},"end":{"line":105,"column":74}},"105":{"start":{"line":106,"column":0},"end":{"line":106,"column":13}},"106":{"start":{"line":107,"column":0},"end":{"line":107,"column":77}},"107":{"start":{"line":108,"column":0},"end":{"line":108,"column":22}},"108":{"start":{"line":109,"column":0},"end":{"line":109,"column":5}},"109":{"start":{"line":110,"column":0},"end":{"line":110,"column":0}},"110":{"start":{"line":111,"column":0},"end":{"line":111,"column":58}},"111":{"start":{"line":112,"column":0},"end":{"line":112,"column":9}},"112":{"start":{"line":113,"column":0},"end":{"line":113,"column":63}},"113":{"start":{"line":114,"column":0},"end":{"line":114,"column":24}},"114":{"start":{"line":115,"column":0},"end":{"line":115,"column":9}},"115":{"start":{"line":116,"column":0},"end":{"line":116,"column":27}},"116":{"start":{"line":117,"column":0},"end":{"line":117,"column":21}},"117":{"start":{"line":118,"column":0},"end":{"line":118,"column":73}},"118":{"start":{"line":119,"column":0},"end":{"line":119,"column":10}},"119":{"start":{"line":120,"column":0},"end":{"line":120,"column":24}},"120":{"start":{"line":121,"column":0},"end":{"line":121,"column":7}},"121":{"start":{"line":122,"column":0},"end":{"line":122,"column":13}},"122":{"start":{"line":123,"column":0},"end":{"line":123,"column":67}},"123":{"start":{"line":124,"column":0},"end":{"line":124,"column":22}},"124":{"start":{"line":125,"column":0},"end":{"line":125,"column":5}},"125":{"start":{"line":126,"column":0},"end":{"line":126,"column":0}},"126":{"start":{"line":127,"column":0},"end":{"line":127,"column":41}},"127":{"start":{"line":128,"column":0},"end":{"line":128,"column":9}},"128":{"start":{"line":129,"column":0},"end":{"line":129,"column":65}},"129":{"start":{"line":130,"column":0},"end":{"line":130,"column":49}},"130":{"start":{"line":131,"column":0},"end":{"line":131,"column":80}},"131":{"start":{"line":132,"column":0},"end":{"line":132,"column":24}},"132":{"start":{"line":133,"column":0},"end":{"line":133,"column":7}},"133":{"start":{"line":134,"column":0},"end":{"line":134,"column":13}},"134":{"start":{"line":135,"column":0},"end":{"line":135,"column":19}},"135":{"start":{"line":136,"column":0},"end":{"line":136,"column":93}},"136":{"start":{"line":137,"column":0},"end":{"line":137,"column":8}},"137":{"start":{"line":138,"column":0},"end":{"line":138,"column":22}},"138":{"start":{"line":139,"column":0},"end":{"line":139,"column":5}},"139":{"start":{"line":140,"column":0},"end":{"line":140,"column":0}},"140":{"start":{"line":141,"column":0},"end":{"line":141,"column":51}},"141":{"start":{"line":142,"column":0},"end":{"line":142,"column":19}},"142":{"start":{"line":143,"column":0},"end":{"line":143,"column":17}},"143":{"start":{"line":144,"column":0},"end":{"line":144,"column":90}},"144":{"start":{"line":145,"column":0},"end":{"line":145,"column":6}},"145":{"start":{"line":146,"column":0},"end":{"line":146,"column":20}},"146":{"start":{"line":147,"column":0},"end":{"line":147,"column":3}},"147":{"start":{"line":148,"column":0},"end":{"line":148,"column":1}},"148":{"start":{"line":149,"column":0},"end":{"line":149,"column":0}},"149":{"start":{"line":150,"column":0},"end":{"line":150,"column":44}},"150":{"start":{"line":151,"column":0},"end":{"line":151,"column":21}},"151":{"start":{"line":152,"column":0},"end":{"line":152,"column":21}},"152":{"start":{"line":153,"column":0},"end":{"line":153,"column":37}},"153":{"start":{"line":154,"column":0},"end":{"line":154,"column":20}},"154":{"start":{"line":155,"column":0},"end":{"line":155,"column":149}},"155":{"start":{"line":156,"column":0},"end":{"line":156,"column":0}},"156":{"start":{"line":157,"column":0},"end":{"line":157,"column":11}},"157":{"start":{"line":158,"column":0},"end":{"line":158,"column":110}},"158":{"start":{"line":159,"column":0},"end":{"line":159,"column":0}},"159":{"start":{"line":160,"column":0},"end":{"line":160,"column":97}},"160":{"start":{"line":161,"column":0},"end":{"line":161,"column":0}},"161":{"start":{"line":162,"column":0},"end":{"line":162,"column":65}},"162":{"start":{"line":163,"column":0},"end":{"line":163,"column":0}},"163":{"start":{"line":164,"column":0},"end":{"line":164,"column":113}},"164":{"start":{"line":165,"column":0},"end":{"line":165,"column":0}},"165":{"start":{"line":166,"column":0},"end":{"line":166,"column":325}},"166":{"start":{"line":167,"column":0},"end":{"line":167,"column":12}},"167":{"start":{"line":168,"column":0},"end":{"line":168,"column":0}},"168":{"start":{"line":169,"column":0},"end":{"line":169,"column":11}},"169":{"start":{"line":170,"column":0},"end":{"line":170,"column":109}},"170":{"start":{"line":171,"column":0},"end":{"line":171,"column":0}},"171":{"start":{"line":172,"column":0},"end":{"line":172,"column":104}},"172":{"start":{"line":173,"column":0},"end":{"line":173,"column":0}},"173":{"start":{"line":174,"column":0},"end":{"line":174,"column":65}},"174":{"start":{"line":175,"column":0},"end":{"line":175,"column":0}},"175":{"start":{"line":176,"column":0},"end":{"line":176,"column":113}},"176":{"start":{"line":177,"column":0},"end":{"line":177,"column":0}},"177":{"start":{"line":178,"column":0},"end":{"line":178,"column":325}},"178":{"start":{"line":179,"column":0},"end":{"line":179,"column":12}},"179":{"start":{"line":180,"column":0},"end":{"line":180,"column":0}},"180":{"start":{"line":181,"column":0},"end":{"line":181,"column":46}},"181":{"start":{"line":182,"column":0},"end":{"line":182,"column":0}},"182":{"start":{"line":183,"column":0},"end":{"line":183,"column":19}},"183":{"start":{"line":184,"column":0},"end":{"line":184,"column":29}},"184":{"start":{"line":185,"column":0},"end":{"line":185,"column":20}},"185":{"start":{"line":186,"column":0},"end":{"line":186,"column":0}},"186":{"start":{"line":187,"column":0},"end":{"line":187,"column":9}},"187":{"start":{"line":188,"column":0},"end":{"line":188,"column":0}},"188":{"start":{"line":189,"column":0},"end":{"line":189,"column":138}},"189":{"start":{"line":190,"column":0},"end":{"line":190,"column":0}},"190":{"start":{"line":191,"column":0},"end":{"line":191,"column":316}},"191":{"start":{"line":192,"column":0},"end":{"line":192,"column":0}},"192":{"start":{"line":193,"column":0},"end":{"line":193,"column":18}},"193":{"start":{"line":194,"column":0},"end":{"line":194,"column":37}},"194":{"start":{"line":195,"column":0},"end":{"line":195,"column":23}},"195":{"start":{"line":196,"column":0},"end":{"line":196,"column":7}},"196":{"start":{"line":197,"column":0},"end":{"line":197,"column":57}},"197":{"start":{"line":198,"column":0},"end":{"line":198,"column":45}},"198":{"start":{"line":199,"column":0},"end":{"line":199,"column":12}},"199":{"start":{"line":200,"column":0},"end":{"line":200,"column":5}},"200":{"start":{"line":201,"column":0},"end":{"line":201,"column":3}},"201":{"start":{"line":202,"column":0},"end":{"line":202,"column":0}},"202":{"start":{"line":203,"column":0},"end":{"line":203,"column":16}},"203":{"start":{"line":204,"column":0},"end":{"line":204,"column":64}},"204":{"start":{"line":205,"column":0},"end":{"line":205,"column":3}},"205":{"start":{"line":206,"column":0},"end":{"line":206,"column":0}},"206":{"start":{"line":207,"column":0},"end":{"line":207,"column":43}},"207":{"start":{"line":208,"column":0},"end":{"line":208,"column":15}},"208":{"start":{"line":209,"column":0},"end":{"line":209,"column":44}},"209":{"start":{"line":210,"column":0},"end":{"line":210,"column":44}},"210":{"start":{"line":211,"column":0},"end":{"line":211,"column":45}},"211":{"start":{"line":212,"column":0},"end":{"line":212,"column":1}},"212":{"start":{"line":213,"column":0},"end":{"line":213,"column":0}},"213":{"start":{"line":214,"column":0},"end":{"line":214,"column":37}},"214":{"start":{"line":215,"column":0},"end":{"line":215,"column":21}},"215":{"start":{"line":216,"column":0},"end":{"line":216,"column":21}},"216":{"start":{"line":217,"column":0},"end":{"line":217,"column":26}},"217":{"start":{"line":218,"column":0},"end":{"line":218,"column":18}},"218":{"start":{"line":219,"column":0},"end":{"line":219,"column":59}},"219":{"start":{"line":220,"column":0},"end":{"line":220,"column":0}},"220":{"start":{"line":221,"column":0},"end":{"line":221,"column":33}},"221":{"start":{"line":222,"column":0},"end":{"line":222,"column":53}},"222":{"start":{"line":223,"column":0},"end":{"line":223,"column":0}},"223":{"start":{"line":224,"column":0},"end":{"line":224,"column":21}},"224":{"start":{"line":225,"column":0},"end":{"line":225,"column":72}},"225":{"start":{"line":226,"column":0},"end":{"line":226,"column":0}},"226":{"start":{"line":227,"column":0},"end":{"line":227,"column":19}},"227":{"start":{"line":228,"column":0},"end":{"line":228,"column":33}},"228":{"start":{"line":229,"column":0},"end":{"line":229,"column":30}},"229":{"start":{"line":230,"column":0},"end":{"line":230,"column":115}},"230":{"start":{"line":231,"column":0},"end":{"line":231,"column":51}},"231":{"start":{"line":232,"column":0},"end":{"line":232,"column":43}},"232":{"start":{"line":233,"column":0},"end":{"line":233,"column":0}},"233":{"start":{"line":234,"column":0},"end":{"line":234,"column":66}},"234":{"start":{"line":235,"column":0},"end":{"line":235,"column":16}},"235":{"start":{"line":236,"column":0},"end":{"line":236,"column":5}},"236":{"start":{"line":237,"column":0},"end":{"line":237,"column":0}},"237":{"start":{"line":238,"column":0},"end":{"line":238,"column":20}},"238":{"start":{"line":239,"column":0},"end":{"line":239,"column":34}},"239":{"start":{"line":240,"column":0},"end":{"line":240,"column":11}},"240":{"start":{"line":241,"column":0},"end":{"line":241,"column":23}},"241":{"start":{"line":242,"column":0},"end":{"line":242,"column":57}},"242":{"start":{"line":243,"column":0},"end":{"line":243,"column":11}},"243":{"start":{"line":244,"column":0},"end":{"line":244,"column":6}},"244":{"start":{"line":245,"column":0},"end":{"line":245,"column":0}},"245":{"start":{"line":246,"column":0},"end":{"line":246,"column":68}},"246":{"start":{"line":247,"column":0},"end":{"line":247,"column":16}},"247":{"start":{"line":248,"column":0},"end":{"line":248,"column":5}},"248":{"start":{"line":249,"column":0},"end":{"line":249,"column":0}},"249":{"start":{"line":250,"column":0},"end":{"line":250,"column":22}},"250":{"start":{"line":251,"column":0},"end":{"line":251,"column":21}},"251":{"start":{"line":252,"column":0},"end":{"line":252,"column":38}},"252":{"start":{"line":253,"column":0},"end":{"line":253,"column":28}},"253":{"start":{"line":254,"column":0},"end":{"line":254,"column":4}},"254":{"start":{"line":255,"column":0},"end":{"line":255,"column":1}}},"s":{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"101":0,"102":0,"103":0,"104":0,"105":0,"106":0,"107":0,"108":0,"109":0,"110":0,"111":0,"112":0,"113":0,"114":0,"115":0,"116":0,"117":0,"118":0,"119":0,"120":0,"121":0,"122":0,"123":0,"124":0,"125":0,"126":0,"127":0,"128":0,"129":0,"130":0,"131":0,"132":0,"133":0,"134":0,"135":0,"136":0,"137":0,"138":0,"139":0,"140":0,"141":0,"142":0,"143":0,"144":0,"145":0,"146":0,"147":0,"148":0,"149":0,"150":0,"151":0,"152":0,"153":0,"154":0,"155":0,"156":0,"157":0,"158":0,"159":0,"160":0,"161":0,"162":0,"163":0,"164":0,"165":0,"166":0,"167":0,"168":0,"169":0,"170":0,"171":0,"172":0,"173":0,"174":0,"175":0,"176":0,"177":0,"178":0,"179":0,"180":0,"181":0,"182":0,"183":0,"184":0,"185":0,"186":0,"187":0,"188":0,"189":0,"190":0,"191":0,"192":0,"193":0,"194":0,"195":0,"196":0,"197":0,"198":0,"199":0,"200":0,"201":0,"202":0,"203":0,"204":0,"205":0,"206":0,"207":0,"208":0,"209":0,"210":0,"211":0,"212":0,"213":0,"214":0,"215":0,"216":0,"217":0,"218":0,"219":0,"220":0,"221":0,"222":0,"223":0,"224":0,"225":0,"226":0,"227":0,"228":0,"229":0,"230":0,"231":0,"232":0,"233":0,"234":0,"235":0,"236":0,"237":0,"238":0,"239":0,"240":0,"241":0,"242":0,"243":0,"244":0,"245":0,"246":0,"247":0,"248":0,"249":0,"250":0,"251":0,"252":0,"253":0,"254":0},"branchMap":{"0":{"type":"branch","line":1,"loc":{"start":{"line":1,"column":0},"end":{"line":255,"column":-326}},"locations":[{"start":{"line":1,"column":0},"end":{"line":255,"column":-326}}]}},"b":{"0":[0]},"fnMap":{"0":{"name":"(empty-report)","decl":{"start":{"line":1,"column":0},"end":{"line":255,"column":-326}},"loc":{"start":{"line":1,"column":0},"end":{"line":255,"column":-326}},"line":1}},"f":{"0":0}} ,"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/sync-forever.ts": {"path":"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/sync-forever.ts","all":true,"statementMap":{"0":{"start":{"line":1,"column":0},"end":{"line":1,"column":30}},"1":{"start":{"line":2,"column":0},"end":{"line":2,"column":26}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":28}},"3":{"start":{"line":4,"column":0},"end":{"line":4,"column":36}},"4":{"start":{"line":5,"column":0},"end":{"line":5,"column":0}},"5":{"start":{"line":6,"column":0},"end":{"line":6,"column":52}},"6":{"start":{"line":7,"column":0},"end":{"line":7,"column":69}},"7":{"start":{"line":8,"column":0},"end":{"line":8,"column":0}},"8":{"start":{"line":9,"column":0},"end":{"line":9,"column":7}},"9":{"start":{"line":10,"column":0},"end":{"line":10,"column":31}},"10":{"start":{"line":11,"column":0},"end":{"line":11,"column":33}},"11":{"start":{"line":12,"column":0},"end":{"line":12,"column":11}},"12":{"start":{"line":13,"column":0},"end":{"line":13,"column":18}},"13":{"start":{"line":14,"column":0},"end":{"line":14,"column":16}},"14":{"start":{"line":15,"column":0},"end":{"line":15,"column":82}},"15":{"start":{"line":16,"column":0},"end":{"line":16,"column":8}},"16":{"start":{"line":17,"column":0},"end":{"line":17,"column":6}},"17":{"start":{"line":18,"column":0},"end":{"line":18,"column":20}},"18":{"start":{"line":19,"column":0},"end":{"line":19,"column":3}},"19":{"start":{"line":20,"column":0},"end":{"line":20,"column":0}},"20":{"start":{"line":21,"column":0},"end":{"line":21,"column":67}},"21":{"start":{"line":22,"column":0},"end":{"line":22,"column":52}},"22":{"start":{"line":23,"column":0},"end":{"line":23,"column":0}},"23":{"start":{"line":24,"column":0},"end":{"line":24,"column":7}},"24":{"start":{"line":25,"column":0},"end":{"line":25,"column":40}},"25":{"start":{"line":26,"column":0},"end":{"line":26,"column":23}},"26":{"start":{"line":27,"column":0},"end":{"line":27,"column":25}},"27":{"start":{"line":28,"column":0},"end":{"line":28,"column":7}},"28":{"start":{"line":29,"column":0},"end":{"line":29,"column":19}},"29":{"start":{"line":30,"column":0},"end":{"line":30,"column":71}},"30":{"start":{"line":31,"column":0},"end":{"line":31,"column":53}},"31":{"start":{"line":32,"column":0},"end":{"line":32,"column":12}},"32":{"start":{"line":33,"column":0},"end":{"line":33,"column":20}},"33":{"start":{"line":34,"column":0},"end":{"line":34,"column":18}},"34":{"start":{"line":35,"column":0},"end":{"line":35,"column":91}},"35":{"start":{"line":36,"column":0},"end":{"line":36,"column":10}},"36":{"start":{"line":37,"column":0},"end":{"line":37,"column":8}},"37":{"start":{"line":38,"column":0},"end":{"line":38,"column":22}},"38":{"start":{"line":39,"column":0},"end":{"line":39,"column":5}},"39":{"start":{"line":40,"column":0},"end":{"line":40,"column":3}},"40":{"start":{"line":41,"column":0},"end":{"line":41,"column":1}}},"s":{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0},"branchMap":{"0":{"type":"branch","line":1,"loc":{"start":{"line":1,"column":0},"end":{"line":41,"column":-333}},"locations":[{"start":{"line":1,"column":0},"end":{"line":41,"column":-333}}]}},"b":{"0":[0]},"fnMap":{"0":{"name":"(empty-report)","decl":{"start":{"line":1,"column":0},"end":{"line":41,"column":-333}},"loc":{"start":{"line":1,"column":0},"end":{"line":41,"column":-333}},"line":1}},"f":{"0":0}} ,"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/sync-one.ts": {"path":"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/sync-one.ts","all":true,"statementMap":{"0":{"start":{"line":1,"column":0},"end":{"line":1,"column":30}},"1":{"start":{"line":2,"column":0},"end":{"line":2,"column":0}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":37}},"3":{"start":{"line":4,"column":0},"end":{"line":4,"column":48}},"4":{"start":{"line":5,"column":0},"end":{"line":5,"column":15}},"5":{"start":{"line":6,"column":0},"end":{"line":6,"column":1}}},"s":{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0},"branchMap":{"0":{"type":"branch","line":1,"loc":{"start":{"line":1,"column":0},"end":{"line":6,"column":-17}},"locations":[{"start":{"line":1,"column":0},"end":{"line":6,"column":-17}}]}},"b":{"0":[0]},"fnMap":{"0":{"name":"(empty-report)","decl":{"start":{"line":1,"column":0},"end":{"line":6,"column":-17}},"loc":{"start":{"line":1,"column":0},"end":{"line":6,"column":-17}},"line":1}},"f":{"0":0}} ,"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/sync.ts": {"path":"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/sync.ts","all":true,"statementMap":{"0":{"start":{"line":1,"column":0},"end":{"line":1,"column":30}},"1":{"start":{"line":2,"column":0},"end":{"line":2,"column":26}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":28}},"3":{"start":{"line":4,"column":0},"end":{"line":4,"column":36}},"4":{"start":{"line":5,"column":0},"end":{"line":5,"column":0}},"5":{"start":{"line":6,"column":0},"end":{"line":6,"column":45}},"6":{"start":{"line":7,"column":0},"end":{"line":7,"column":67}},"7":{"start":{"line":8,"column":0},"end":{"line":8,"column":0}},"8":{"start":{"line":9,"column":0},"end":{"line":9,"column":7}},"9":{"start":{"line":10,"column":0},"end":{"line":10,"column":30}},"10":{"start":{"line":11,"column":0},"end":{"line":11,"column":32}},"11":{"start":{"line":12,"column":0},"end":{"line":12,"column":11}},"12":{"start":{"line":13,"column":0},"end":{"line":13,"column":18}},"13":{"start":{"line":14,"column":0},"end":{"line":14,"column":16}},"14":{"start":{"line":15,"column":0},"end":{"line":15,"column":81}},"15":{"start":{"line":16,"column":0},"end":{"line":16,"column":8}},"16":{"start":{"line":17,"column":0},"end":{"line":17,"column":6}},"17":{"start":{"line":18,"column":0},"end":{"line":18,"column":20}},"18":{"start":{"line":19,"column":0},"end":{"line":19,"column":3}},"19":{"start":{"line":20,"column":0},"end":{"line":20,"column":0}},"20":{"start":{"line":21,"column":0},"end":{"line":21,"column":48}},"21":{"start":{"line":22,"column":0},"end":{"line":22,"column":0}},"22":{"start":{"line":23,"column":0},"end":{"line":23,"column":7}},"23":{"start":{"line":24,"column":0},"end":{"line":24,"column":39}},"24":{"start":{"line":25,"column":0},"end":{"line":25,"column":23}},"25":{"start":{"line":26,"column":0},"end":{"line":26,"column":25}},"26":{"start":{"line":27,"column":0},"end":{"line":27,"column":7}},"27":{"start":{"line":28,"column":0},"end":{"line":28,"column":0}},"28":{"start":{"line":29,"column":0},"end":{"line":29,"column":60}},"29":{"start":{"line":30,"column":0},"end":{"line":30,"column":19}},"30":{"start":{"line":31,"column":0},"end":{"line":31,"column":18}},"31":{"start":{"line":32,"column":0},"end":{"line":32,"column":16}},"32":{"start":{"line":33,"column":0},"end":{"line":33,"column":81}},"33":{"start":{"line":34,"column":0},"end":{"line":34,"column":8}},"34":{"start":{"line":35,"column":0},"end":{"line":35,"column":6}},"35":{"start":{"line":36,"column":0},"end":{"line":36,"column":20}},"36":{"start":{"line":37,"column":0},"end":{"line":37,"column":3}},"37":{"start":{"line":38,"column":0},"end":{"line":38,"column":1}}},"s":{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0},"branchMap":{"0":{"type":"branch","line":1,"loc":{"start":{"line":1,"column":0},"end":{"line":38,"column":-315}},"locations":[{"start":{"line":1,"column":0},"end":{"line":38,"column":-315}}]}},"b":{"0":[0]},"fnMap":{"0":{"name":"(empty-report)","decl":{"start":{"line":1,"column":0},"end":{"line":38,"column":-315}},"loc":{"start":{"line":1,"column":0},"end":{"line":38,"column":-315}},"line":1}},"f":{"0":0}} ,"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/visualize.ts": {"path":"/Users/dex/go/src/github.com/dexhorthy/repomirror/src/commands/visualize.ts","all":true,"statementMap":{"0":{"start":{"line":1,"column":0},"end":{"line":1,"column":48}},"1":{"start":{"line":2,"column":0},"end":{"line":2,"column":0}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":16}},"3":{"start":{"line":4,"column":0},"end":{"line":4,"column":19}},"4":{"start":{"line":5,"column":0},"end":{"line":5,"column":20}},"5":{"start":{"line":6,"column":0},"end":{"line":6,"column":17}},"6":{"start":{"line":7,"column":0},"end":{"line":7,"column":18}},"7":{"start":{"line":8,"column":0},"end":{"line":8,"column":20}},"8":{"start":{"line":9,"column":0},"end":{"line":9,"column":21}},"9":{"start":{"line":10,"column":0},"end":{"line":10,"column":19}},"10":{"start":{"line":11,"column":0},"end":{"line":11,"column":22}},"11":{"start":{"line":12,"column":0},"end":{"line":12,"column":19}},"12":{"start":{"line":13,"column":0},"end":{"line":13,"column":2}},"13":{"start":{"line":14,"column":0},"end":{"line":14,"column":0}},"14":{"start":{"line":15,"column":0},"end":{"line":15,"column":45}},"15":{"start":{"line":16,"column":0},"end":{"line":16,"column":17}},"16":{"start":{"line":17,"column":0},"end":{"line":17,"column":18}},"17":{"start":{"line":18,"column":0},"end":{"line":18,"column":28}},"18":{"start":{"line":19,"column":0},"end":{"line":19,"column":16}},"19":{"start":{"line":20,"column":0},"end":{"line":20,"column":25}},"20":{"start":{"line":21,"column":0},"end":{"line":21,"column":21}},"21":{"start":{"line":22,"column":0},"end":{"line":22,"column":26}},"22":{"start":{"line":23,"column":0},"end":{"line":23,"column":20}},"23":{"start":{"line":24,"column":0},"end":{"line":24,"column":25}},"24":{"start":{"line":25,"column":0},"end":{"line":25,"column":23}},"25":{"start":{"line":26,"column":0},"end":{"line":26,"column":27}},"26":{"start":{"line":27,"column":0},"end":{"line":27,"column":19}},"27":{"start":{"line":28,"column":0},"end":{"line":28,"column":24}},"28":{"start":{"line":29,"column":0},"end":{"line":29,"column":16}},"29":{"start":{"line":30,"column":0},"end":{"line":30,"column":26}},"30":{"start":{"line":31,"column":0},"end":{"line":31,"column":12}},"31":{"start":{"line":32,"column":0},"end":{"line":32,"column":26}},"32":{"start":{"line":33,"column":0},"end":{"line":33,"column":3}},"33":{"start":{"line":34,"column":0},"end":{"line":34,"column":1}},"34":{"start":{"line":35,"column":0},"end":{"line":35,"column":0}},"35":{"start":{"line":36,"column":0},"end":{"line":36,"column":16}},"36":{"start":{"line":37,"column":0},"end":{"line":37,"column":17}},"37":{"start":{"line":38,"column":0},"end":{"line":38,"column":18}},"38":{"start":{"line":39,"column":0},"end":{"line":39,"column":20}},"39":{"start":{"line":40,"column":0},"end":{"line":40,"column":1}},"40":{"start":{"line":41,"column":0},"end":{"line":41,"column":0}},"41":{"start":{"line":42,"column":0},"end":{"line":42,"column":48}},"42":{"start":{"line":43,"column":0},"end":{"line":43,"column":84}},"43":{"start":{"line":44,"column":0},"end":{"line":44,"column":0}},"44":{"start":{"line":45,"column":0},"end":{"line":45,"column":24}},"45":{"start":{"line":46,"column":0},"end":{"line":46,"column":41}},"46":{"start":{"line":47,"column":0},"end":{"line":47,"column":47}},"47":{"start":{"line":48,"column":0},"end":{"line":48,"column":26}},"48":{"start":{"line":49,"column":0},"end":{"line":49,"column":4}},"49":{"start":{"line":50,"column":0},"end":{"line":50,"column":0}},"50":{"start":{"line":51,"column":0},"end":{"line":51,"column":23}},"51":{"start":{"line":52,"column":0},"end":{"line":52,"column":19}},"52":{"start":{"line":53,"column":0},"end":{"line":53,"column":22}},"53":{"start":{"line":54,"column":0},"end":{"line":54,"column":18}},"54":{"start":{"line":55,"column":0},"end":{"line":55,"column":4}},"55":{"start":{"line":56,"column":0},"end":{"line":56,"column":0}},"56":{"start":{"line":57,"column":0},"end":{"line":57,"column":26}},"57":{"start":{"line":58,"column":0},"end":{"line":58,"column":21}},"58":{"start":{"line":59,"column":0},"end":{"line":59,"column":26}},"59":{"start":{"line":60,"column":0},"end":{"line":60,"column":20}},"60":{"start":{"line":61,"column":0},"end":{"line":61,"column":4}},"61":{"start":{"line":62,"column":0},"end":{"line":62,"column":0}},"62":{"start":{"line":63,"column":0},"end":{"line":63,"column":34}},"63":{"start":{"line":64,"column":0},"end":{"line":64,"column":23}},"64":{"start":{"line":65,"column":0},"end":{"line":65,"column":77}},"65":{"start":{"line":66,"column":0},"end":{"line":66,"column":22}},"66":{"start":{"line":67,"column":0},"end":{"line":67,"column":66}},"67":{"start":{"line":68,"column":0},"end":{"line":68,"column":25}},"68":{"start":{"line":69,"column":0},"end":{"line":69,"column":69}},"69":{"start":{"line":70,"column":0},"end":{"line":70,"column":19}},"70":{"start":{"line":71,"column":0},"end":{"line":71,"column":62}},"71":{"start":{"line":72,"column":0},"end":{"line":72,"column":0}},"72":{"start":{"line":73,"column":0},"end":{"line":73,"column":89}},"73":{"start":{"line":74,"column":0},"end":{"line":74,"column":67}},"74":{"start":{"line":75,"column":0},"end":{"line":75,"column":0}},"75":{"start":{"line":76,"column":0},"end":{"line":76,"column":40}},"76":{"start":{"line":77,"column":0},"end":{"line":77,"column":75}},"77":{"start":{"line":78,"column":0},"end":{"line":78,"column":5}},"78":{"start":{"line":79,"column":0},"end":{"line":79,"column":0}},"79":{"start":{"line":80,"column":0},"end":{"line":80,"column":19}},"80":{"start":{"line":81,"column":0},"end":{"line":81,"column":5}},"81":{"start":{"line":82,"column":0},"end":{"line":82,"column":0}},"82":{"start":{"line":83,"column":0},"end":{"line":83,"column":22}},"83":{"start":{"line":84,"column":0},"end":{"line":84,"column":73}},"84":{"start":{"line":85,"column":0},"end":{"line":85,"column":76}},"85":{"start":{"line":86,"column":0},"end":{"line":86,"column":69}},"86":{"start":{"line":87,"column":0},"end":{"line":87,"column":0}},"87":{"start":{"line":88,"column":0},"end":{"line":88,"column":97}},"88":{"start":{"line":89,"column":0},"end":{"line":89,"column":81}},"89":{"start":{"line":90,"column":0},"end":{"line":90,"column":78}},"90":{"start":{"line":91,"column":0},"end":{"line":91,"column":99}},"91":{"start":{"line":92,"column":0},"end":{"line":92,"column":0}},"92":{"start":{"line":93,"column":0},"end":{"line":93,"column":16}},"93":{"start":{"line":94,"column":0},"end":{"line":94,"column":1}},"94":{"start":{"line":95,"column":0},"end":{"line":95,"column":0}},"95":{"start":{"line":96,"column":0},"end":{"line":96,"column":43}},"96":{"start":{"line":97,"column":0},"end":{"line":97,"column":38}},"97":{"start":{"line":98,"column":0},"end":{"line":98,"column":39}},"98":{"start":{"line":99,"column":0},"end":{"line":99,"column":0}},"99":{"start":{"line":100,"column":0},"end":{"line":100,"column":94}},"100":{"start":{"line":101,"column":0},"end":{"line":101,"column":0}},"101":{"start":{"line":102,"column":0},"end":{"line":102,"column":41}},"102":{"start":{"line":103,"column":0},"end":{"line":103,"column":6}},"103":{"start":{"line":104,"column":0},"end":{"line":104,"column":27}},"104":{"start":{"line":105,"column":0},"end":{"line":105,"column":52}},"105":{"start":{"line":106,"column":0},"end":{"line":106,"column":5}},"106":{"start":{"line":107,"column":0},"end":{"line":107,"column":52}},"107":{"start":{"line":108,"column":0},"end":{"line":108,"column":61}},"108":{"start":{"line":109,"column":0},"end":{"line":109,"column":45}},"109":{"start":{"line":110,"column":0},"end":{"line":110,"column":5}},"110":{"start":{"line":111,"column":0},"end":{"line":111,"column":3}},"111":{"start":{"line":112,"column":0},"end":{"line":112,"column":0}},"112":{"start":{"line":113,"column":0},"end":{"line":113,"column":30}},"113":{"start":{"line":114,"column":0},"end":{"line":114,"column":65}},"114":{"start":{"line":115,"column":0},"end":{"line":115,"column":50}},"115":{"start":{"line":116,"column":0},"end":{"line":116,"column":52}},"116":{"start":{"line":117,"column":0},"end":{"line":117,"column":0}},"117":{"start":{"line":118,"column":0},"end":{"line":118,"column":42}},"118":{"start":{"line":119,"column":0},"end":{"line":119,"column":65}},"119":{"start":{"line":120,"column":0},"end":{"line":120,"column":0}},"120":{"start":{"line":121,"column":0},"end":{"line":121,"column":20}},"121":{"start":{"line":122,"column":0},"end":{"line":122,"column":25}},"122":{"start":{"line":123,"column":0},"end":{"line":123,"column":0}},"123":{"start":{"line":124,"column":0},"end":{"line":124,"column":63}},"124":{"start":{"line":125,"column":0},"end":{"line":125,"column":65}},"125":{"start":{"line":126,"column":0},"end":{"line":126,"column":60}},"126":{"start":{"line":127,"column":0},"end":{"line":127,"column":73}},"127":{"start":{"line":128,"column":0},"end":{"line":128,"column":66}},"128":{"start":{"line":129,"column":0},"end":{"line":129,"column":58}},"129":{"start":{"line":130,"column":0},"end":{"line":130,"column":69}},"130":{"start":{"line":131,"column":0},"end":{"line":131,"column":74}},"131":{"start":{"line":132,"column":0},"end":{"line":132,"column":32}},"132":{"start":{"line":133,"column":0},"end":{"line":133,"column":66}},"133":{"start":{"line":134,"column":0},"end":{"line":134,"column":58}},"134":{"start":{"line":135,"column":0},"end":{"line":135,"column":0}},"135":{"start":{"line":136,"column":0},"end":{"line":136,"column":31}},"136":{"start":{"line":137,"column":0},"end":{"line":137,"column":71}},"137":{"start":{"line":138,"column":0},"end":{"line":138,"column":7}},"138":{"start":{"line":139,"column":0},"end":{"line":139,"column":5}},"139":{"start":{"line":140,"column":0},"end":{"line":140,"column":0}},"140":{"start":{"line":141,"column":0},"end":{"line":141,"column":32}},"141":{"start":{"line":142,"column":0},"end":{"line":142,"column":0}},"142":{"start":{"line":143,"column":0},"end":{"line":143,"column":64}},"143":{"start":{"line":144,"column":0},"end":{"line":144,"column":20}},"144":{"start":{"line":145,"column":0},"end":{"line":145,"column":32}},"145":{"start":{"line":146,"column":0},"end":{"line":146,"column":0}},"146":{"start":{"line":147,"column":0},"end":{"line":147,"column":49}},"147":{"start":{"line":148,"column":0},"end":{"line":148,"column":53}},"148":{"start":{"line":149,"column":0},"end":{"line":149,"column":7}},"149":{"start":{"line":150,"column":0},"end":{"line":150,"column":76}},"150":{"start":{"line":151,"column":0},"end":{"line":151,"column":79}},"151":{"start":{"line":152,"column":0},"end":{"line":152,"column":28}},"152":{"start":{"line":153,"column":0},"end":{"line":153,"column":61}},"153":{"start":{"line":154,"column":0},"end":{"line":154,"column":57}},"154":{"start":{"line":155,"column":0},"end":{"line":155,"column":28}},"155":{"start":{"line":156,"column":0},"end":{"line":156,"column":115}},"156":{"start":{"line":157,"column":0},"end":{"line":157,"column":10}},"157":{"start":{"line":158,"column":0},"end":{"line":158,"column":7}},"158":{"start":{"line":159,"column":0},"end":{"line":159,"column":28}},"159":{"start":{"line":160,"column":0},"end":{"line":160,"column":63}},"160":{"start":{"line":161,"column":0},"end":{"line":161,"column":0}},"161":{"start":{"line":162,"column":0},"end":{"line":162,"column":38}},"162":{"start":{"line":163,"column":0},"end":{"line":163,"column":84}},"163":{"start":{"line":164,"column":0},"end":{"line":164,"column":7}},"164":{"start":{"line":165,"column":0},"end":{"line":165,"column":5}},"165":{"start":{"line":166,"column":0},"end":{"line":166,"column":51}},"166":{"start":{"line":167,"column":0},"end":{"line":167,"column":60}},"167":{"start":{"line":168,"column":0},"end":{"line":168,"column":61}},"168":{"start":{"line":169,"column":0},"end":{"line":169,"column":44}},"169":{"start":{"line":170,"column":0},"end":{"line":170,"column":41}},"170":{"start":{"line":171,"column":0},"end":{"line":171,"column":51}},"171":{"start":{"line":172,"column":0},"end":{"line":172,"column":62}},"172":{"start":{"line":173,"column":0},"end":{"line":173,"column":0}},"173":{"start":{"line":174,"column":0},"end":{"line":174,"column":46}},"174":{"start":{"line":175,"column":0},"end":{"line":175,"column":28}},"175":{"start":{"line":176,"column":0},"end":{"line":176,"column":26}},"176":{"start":{"line":177,"column":0},"end":{"line":177,"column":45}},"177":{"start":{"line":178,"column":0},"end":{"line":178,"column":29}},"178":{"start":{"line":179,"column":0},"end":{"line":179,"column":46}},"179":{"start":{"line":180,"column":0},"end":{"line":180,"column":45}},"180":{"start":{"line":181,"column":0},"end":{"line":181,"column":40}},"181":{"start":{"line":182,"column":0},"end":{"line":182,"column":93}},"182":{"start":{"line":183,"column":0},"end":{"line":183,"column":31}},"183":{"start":{"line":184,"column":0},"end":{"line":184,"column":57}},"184":{"start":{"line":185,"column":0},"end":{"line":185,"column":9}},"185":{"start":{"line":186,"column":0},"end":{"line":186,"column":0}},"186":{"start":{"line":187,"column":0},"end":{"line":187,"column":40}},"187":{"start":{"line":188,"column":0},"end":{"line":188,"column":50}},"188":{"start":{"line":189,"column":0},"end":{"line":189,"column":71}},"189":{"start":{"line":190,"column":0},"end":{"line":190,"column":9}},"190":{"start":{"line":191,"column":0},"end":{"line":191,"column":50}},"191":{"start":{"line":192,"column":0},"end":{"line":192,"column":70}},"192":{"start":{"line":193,"column":0},"end":{"line":193,"column":9}},"193":{"start":{"line":194,"column":0},"end":{"line":194,"column":7}},"194":{"start":{"line":195,"column":0},"end":{"line":195,"column":30}},"195":{"start":{"line":196,"column":0},"end":{"line":196,"column":49}},"196":{"start":{"line":197,"column":0},"end":{"line":197,"column":90}},"197":{"start":{"line":198,"column":0},"end":{"line":198,"column":5}},"198":{"start":{"line":199,"column":0},"end":{"line":199,"column":49}},"199":{"start":{"line":200,"column":0},"end":{"line":200,"column":62}},"200":{"start":{"line":201,"column":0},"end":{"line":201,"column":3}},"201":{"start":{"line":202,"column":0},"end":{"line":202,"column":0}},"202":{"start":{"line":203,"column":0},"end":{"line":203,"column":48}},"203":{"start":{"line":204,"column":0},"end":{"line":204,"column":54}},"204":{"start":{"line":205,"column":0},"end":{"line":205,"column":50}},"205":{"start":{"line":206,"column":0},"end":{"line":206,"column":36}},"206":{"start":{"line":207,"column":0},"end":{"line":207,"column":6}},"207":{"start":{"line":208,"column":0},"end":{"line":208,"column":28}},"208":{"start":{"line":209,"column":0},"end":{"line":209,"column":83}},"209":{"start":{"line":210,"column":0},"end":{"line":210,"column":67}},"210":{"start":{"line":211,"column":0},"end":{"line":211,"column":29}},"211":{"start":{"line":212,"column":0},"end":{"line":212,"column":68}},"212":{"start":{"line":213,"column":0},"end":{"line":213,"column":7}},"213":{"start":{"line":214,"column":0},"end":{"line":214,"column":29}},"214":{"start":{"line":215,"column":0},"end":{"line":215,"column":68}},"215":{"start":{"line":216,"column":0},"end":{"line":216,"column":7}},"216":{"start":{"line":217,"column":0},"end":{"line":217,"column":52}},"217":{"start":{"line":218,"column":0},"end":{"line":218,"column":60}},"218":{"start":{"line":219,"column":0},"end":{"line":219,"column":7}},"219":{"start":{"line":220,"column":0},"end":{"line":220,"column":5}},"220":{"start":{"line":221,"column":0},"end":{"line":221,"column":3}},"221":{"start":{"line":222,"column":0},"end":{"line":222,"column":0}},"222":{"start":{"line":223,"column":0},"end":{"line":223,"column":21}},"223":{"start":{"line":224,"column":0},"end":{"line":224,"column":19}},"224":{"start":{"line":225,"column":0},"end":{"line":225,"column":28}},"225":{"start":{"line":226,"column":0},"end":{"line":226,"column":37}},"226":{"start":{"line":227,"column":0},"end":{"line":227,"column":78}},"227":{"start":{"line":228,"column":0},"end":{"line":228,"column":62}},"228":{"start":{"line":229,"column":0},"end":{"line":229,"column":51}},"229":{"start":{"line":230,"column":0},"end":{"line":230,"column":45}},"230":{"start":{"line":231,"column":0},"end":{"line":231,"column":61}},"231":{"start":{"line":232,"column":0},"end":{"line":232,"column":34}},"232":{"start":{"line":233,"column":0},"end":{"line":233,"column":53}},"233":{"start":{"line":234,"column":0},"end":{"line":234,"column":3}},"234":{"start":{"line":235,"column":0},"end":{"line":235,"column":0}},"235":{"start":{"line":236,"column":0},"end":{"line":236,"column":16}},"236":{"start":{"line":237,"column":0},"end":{"line":237,"column":62}},"237":{"start":{"line":238,"column":0},"end":{"line":238,"column":3}},"238":{"start":{"line":239,"column":0},"end":{"line":239,"column":0}},"239":{"start":{"line":240,"column":0},"end":{"line":240,"column":16}},"240":{"start":{"line":241,"column":0},"end":{"line":241,"column":1}},"241":{"start":{"line":242,"column":0},"end":{"line":242,"column":0}},"242":{"start":{"line":243,"column":0},"end":{"line":243,"column":35}},"243":{"start":{"line":244,"column":0},"end":{"line":244,"column":16}},"244":{"start":{"line":245,"column":0},"end":{"line":245,"column":20}},"245":{"start":{"line":246,"column":0},"end":{"line":246,"column":22}},"246":{"start":{"line":247,"column":0},"end":{"line":247,"column":24}},"247":{"start":{"line":248,"column":0},"end":{"line":248,"column":26}},"248":{"start":{"line":249,"column":0},"end":{"line":249,"column":3}},"249":{"start":{"line":250,"column":0},"end":{"line":250,"column":33}},"250":{"start":{"line":251,"column":0},"end":{"line":251,"column":75}},"251":{"start":{"line":252,"column":0},"end":{"line":252,"column":0}},"252":{"start":{"line":253,"column":0},"end":{"line":253,"column":23}},"253":{"start":{"line":254,"column":0},"end":{"line":254,"column":55}},"254":{"start":{"line":255,"column":0},"end":{"line":255,"column":38}},"255":{"start":{"line":256,"column":0},"end":{"line":256,"column":41}},"256":{"start":{"line":257,"column":0},"end":{"line":257,"column":58}},"257":{"start":{"line":258,"column":0},"end":{"line":258,"column":0}},"258":{"start":{"line":259,"column":0},"end":{"line":259,"column":23}},"259":{"start":{"line":260,"column":0},"end":{"line":260,"column":81}},"260":{"start":{"line":261,"column":0},"end":{"line":261,"column":4}},"261":{"start":{"line":262,"column":0},"end":{"line":262,"column":0}},"262":{"start":{"line":263,"column":0},"end":{"line":263,"column":27}},"263":{"start":{"line":264,"column":0},"end":{"line":264,"column":22}},"264":{"start":{"line":265,"column":0},"end":{"line":265,"column":44}},"265":{"start":{"line":266,"column":0},"end":{"line":266,"column":28}},"266":{"start":{"line":267,"column":0},"end":{"line":267,"column":45}},"267":{"start":{"line":268,"column":0},"end":{"line":268,"column":41}},"268":{"start":{"line":269,"column":0},"end":{"line":269,"column":36}},"269":{"start":{"line":270,"column":0},"end":{"line":270,"column":0}},"270":{"start":{"line":271,"column":0},"end":{"line":271,"column":25}},"271":{"start":{"line":272,"column":0},"end":{"line":272,"column":77}},"272":{"start":{"line":273,"column":0},"end":{"line":273,"column":6}},"273":{"start":{"line":274,"column":0},"end":{"line":274,"column":0}},"274":{"start":{"line":275,"column":0},"end":{"line":275,"column":18}},"275":{"start":{"line":276,"column":0},"end":{"line":276,"column":65}},"276":{"start":{"line":277,"column":0},"end":{"line":277,"column":5}},"277":{"start":{"line":278,"column":0},"end":{"line":278,"column":0}},"278":{"start":{"line":279,"column":0},"end":{"line":279,"column":37}},"279":{"start":{"line":280,"column":0},"end":{"line":280,"column":50}},"280":{"start":{"line":281,"column":0},"end":{"line":281,"column":43}},"281":{"start":{"line":282,"column":0},"end":{"line":282,"column":28}},"282":{"start":{"line":283,"column":0},"end":{"line":283,"column":62}},"283":{"start":{"line":284,"column":0},"end":{"line":284,"column":80}},"284":{"start":{"line":285,"column":0},"end":{"line":285,"column":7}},"285":{"start":{"line":286,"column":0},"end":{"line":286,"column":5}},"286":{"start":{"line":287,"column":0},"end":{"line":287,"column":0}},"287":{"start":{"line":288,"column":0},"end":{"line":288,"column":37}},"288":{"start":{"line":289,"column":0},"end":{"line":289,"column":27}},"289":{"start":{"line":290,"column":0},"end":{"line":290,"column":92}},"290":{"start":{"line":291,"column":0},"end":{"line":291,"column":8}},"291":{"start":{"line":292,"column":0},"end":{"line":292,"column":5}},"292":{"start":{"line":293,"column":0},"end":{"line":293,"column":3}},"293":{"start":{"line":294,"column":0},"end":{"line":294,"column":0}},"294":{"start":{"line":295,"column":0},"end":{"line":295,"column":31}},"295":{"start":{"line":296,"column":0},"end":{"line":296,"column":1}},"296":{"start":{"line":297,"column":0},"end":{"line":297,"column":0}},"297":{"start":{"line":298,"column":0},"end":{"line":298,"column":32}},"298":{"start":{"line":299,"column":0},"end":{"line":299,"column":36}},"299":{"start":{"line":300,"column":0},"end":{"line":300,"column":18}},"300":{"start":{"line":301,"column":0},"end":{"line":301,"column":30}},"301":{"start":{"line":302,"column":0},"end":{"line":302,"column":25}},"302":{"start":{"line":303,"column":0},"end":{"line":303,"column":24}},"303":{"start":{"line":304,"column":0},"end":{"line":304,"column":5}},"304":{"start":{"line":305,"column":0},"end":{"line":305,"column":0}},"305":{"start":{"line":306,"column":0},"end":{"line":306,"column":70}},"306":{"start":{"line":307,"column":0},"end":{"line":307,"column":62}},"307":{"start":{"line":308,"column":0},"end":{"line":308,"column":81}},"308":{"start":{"line":309,"column":0},"end":{"line":309,"column":74}},"309":{"start":{"line":310,"column":0},"end":{"line":310,"column":37}},"310":{"start":{"line":311,"column":0},"end":{"line":311,"column":0}},"311":{"start":{"line":312,"column":0},"end":{"line":312,"column":27}},"312":{"start":{"line":313,"column":0},"end":{"line":313,"column":22}},"313":{"start":{"line":314,"column":0},"end":{"line":314,"column":33}},"314":{"start":{"line":315,"column":0},"end":{"line":315,"column":70}},"315":{"start":{"line":316,"column":0},"end":{"line":316,"column":13}},"316":{"start":{"line":317,"column":0},"end":{"line":317,"column":0}},"317":{"start":{"line":318,"column":0},"end":{"line":318,"column":11}},"318":{"start":{"line":319,"column":0},"end":{"line":319,"column":38}},"319":{"start":{"line":320,"column":0},"end":{"line":320,"column":0}},"320":{"start":{"line":321,"column":0},"end":{"line":321,"column":39}},"321":{"start":{"line":322,"column":0},"end":{"line":322,"column":74}},"322":{"start":{"line":323,"column":0},"end":{"line":323,"column":51}},"323":{"start":{"line":324,"column":0},"end":{"line":324,"column":37}},"324":{"start":{"line":325,"column":0},"end":{"line":325,"column":0}},"325":{"start":{"line":326,"column":0},"end":{"line":326,"column":32}},"326":{"start":{"line":327,"column":0},"end":{"line":327,"column":33}},"327":{"start":{"line":328,"column":0},"end":{"line":328,"column":27}},"328":{"start":{"line":329,"column":0},"end":{"line":329,"column":33}},"329":{"start":{"line":330,"column":0},"end":{"line":330,"column":13}},"330":{"start":{"line":331,"column":0},"end":{"line":331,"column":0}},"331":{"start":{"line":332,"column":0},"end":{"line":332,"column":65}},"332":{"start":{"line":333,"column":0},"end":{"line":333,"column":43}},"333":{"start":{"line":334,"column":0},"end":{"line":334,"column":54}},"334":{"start":{"line":335,"column":0},"end":{"line":335,"column":38}},"335":{"start":{"line":336,"column":0},"end":{"line":336,"column":23}},"336":{"start":{"line":337,"column":0},"end":{"line":337,"column":19}},"337":{"start":{"line":338,"column":0},"end":{"line":338,"column":32}},"338":{"start":{"line":339,"column":0},"end":{"line":339,"column":31}},"339":{"start":{"line":340,"column":0},"end":{"line":340,"column":24}},"340":{"start":{"line":341,"column":0},"end":{"line":341,"column":14}},"341":{"start":{"line":342,"column":0},"end":{"line":342,"column":42}},"342":{"start":{"line":343,"column":0},"end":{"line":343,"column":18}},"343":{"start":{"line":344,"column":0},"end":{"line":344,"column":59}},"344":{"start":{"line":345,"column":0},"end":{"line":345,"column":73}},"345":{"start":{"line":346,"column":0},"end":{"line":346,"column":33}},"346":{"start":{"line":347,"column":0},"end":{"line":347,"column":75}},"347":{"start":{"line":348,"column":0},"end":{"line":348,"column":14}},"348":{"start":{"line":349,"column":0},"end":{"line":349,"column":11}},"349":{"start":{"line":350,"column":0},"end":{"line":350,"column":9}},"350":{"start":{"line":351,"column":0},"end":{"line":351,"column":41}},"351":{"start":{"line":352,"column":0},"end":{"line":352,"column":17}},"352":{"start":{"line":353,"column":0},"end":{"line":353,"column":33}},"353":{"start":{"line":354,"column":0},"end":{"line":354,"column":60}},"354":{"start":{"line":355,"column":0},"end":{"line":355,"column":11}},"355":{"start":{"line":356,"column":0},"end":{"line":356,"column":53}},"356":{"start":{"line":357,"column":0},"end":{"line":357,"column":48}},"357":{"start":{"line":358,"column":0},"end":{"line":358,"column":0}},"358":{"start":{"line":359,"column":0},"end":{"line":359,"column":38}},"359":{"start":{"line":360,"column":0},"end":{"line":360,"column":68}},"360":{"start":{"line":361,"column":0},"end":{"line":361,"column":49}},"361":{"start":{"line":362,"column":0},"end":{"line":362,"column":38}},"362":{"start":{"line":363,"column":0},"end":{"line":363,"column":49}},"363":{"start":{"line":364,"column":0},"end":{"line":364,"column":30}},"364":{"start":{"line":365,"column":0},"end":{"line":365,"column":19}},"365":{"start":{"line":366,"column":0},"end":{"line":366,"column":31}},"366":{"start":{"line":367,"column":0},"end":{"line":367,"column":24}},"367":{"start":{"line":368,"column":0},"end":{"line":368,"column":14}},"368":{"start":{"line":369,"column":0},"end":{"line":369,"column":37}},"369":{"start":{"line":370,"column":0},"end":{"line":370,"column":18}},"370":{"start":{"line":371,"column":0},"end":{"line":371,"column":58}},"371":{"start":{"line":372,"column":0},"end":{"line":372,"column":40}},"372":{"start":{"line":373,"column":0},"end":{"line":373,"column":31}},"373":{"start":{"line":374,"column":0},"end":{"line":374,"column":35}},"374":{"start":{"line":375,"column":0},"end":{"line":375,"column":15}},"375":{"start":{"line":376,"column":0},"end":{"line":376,"column":11}},"376":{"start":{"line":377,"column":0},"end":{"line":377,"column":9}},"377":{"start":{"line":378,"column":0},"end":{"line":378,"column":71}},"378":{"start":{"line":379,"column":0},"end":{"line":379,"column":57}},"379":{"start":{"line":380,"column":0},"end":{"line":380,"column":73}},"380":{"start":{"line":381,"column":0},"end":{"line":381,"column":31}},"381":{"start":{"line":382,"column":0},"end":{"line":382,"column":85}},"382":{"start":{"line":383,"column":0},"end":{"line":383,"column":12}},"383":{"start":{"line":384,"column":0},"end":{"line":384,"column":51}},"384":{"start":{"line":385,"column":0},"end":{"line":385,"column":9}},"385":{"start":{"line":386,"column":0},"end":{"line":386,"column":56}},"386":{"start":{"line":387,"column":0},"end":{"line":387,"column":14}},"387":{"start":{"line":388,"column":0},"end":{"line":388,"column":73}},"388":{"start":{"line":389,"column":0},"end":{"line":389,"column":9}},"389":{"start":{"line":390,"column":0},"end":{"line":390,"column":0}},"390":{"start":{"line":391,"column":0},"end":{"line":391,"column":60}},"391":{"start":{"line":392,"column":0},"end":{"line":392,"column":24}},"392":{"start":{"line":393,"column":0},"end":{"line":393,"column":32}},"393":{"start":{"line":394,"column":0},"end":{"line":394,"column":71}},"394":{"start":{"line":395,"column":0},"end":{"line":395,"column":24}},"395":{"start":{"line":396,"column":0},"end":{"line":396,"column":29}},"396":{"start":{"line":397,"column":0},"end":{"line":397,"column":68}},"397":{"start":{"line":398,"column":0},"end":{"line":398,"column":10}},"398":{"start":{"line":399,"column":0},"end":{"line":399,"column":29}},"399":{"start":{"line":400,"column":0},"end":{"line":400,"column":77}},"400":{"start":{"line":401,"column":0},"end":{"line":401,"column":10}},"401":{"start":{"line":402,"column":0},"end":{"line":402,"column":7}},"402":{"start":{"line":403,"column":0},"end":{"line":403,"column":5}},"403":{"start":{"line":404,"column":0},"end":{"line":404,"column":5}},"404":{"start":{"line":405,"column":0},"end":{"line":405,"column":0}},"405":{"start":{"line":406,"column":0},"end":{"line":406,"column":24}},"406":{"start":{"line":407,"column":0},"end":{"line":407,"column":95}},"407":{"start":{"line":408,"column":0},"end":{"line":408,"column":74}},"408":{"start":{"line":409,"column":0},"end":{"line":409,"column":27}},"409":{"start":{"line":410,"column":0},"end":{"line":410,"column":94}},"410":{"start":{"line":411,"column":0},"end":{"line":411,"column":8}},"411":{"start":{"line":412,"column":0},"end":{"line":412,"column":68}},"412":{"start":{"line":413,"column":0},"end":{"line":413,"column":5}},"413":{"start":{"line":414,"column":0},"end":{"line":414,"column":5}},"414":{"start":{"line":415,"column":0},"end":{"line":415,"column":1}}},"s":{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"101":0,"102":0,"103":0,"104":0,"105":0,"106":0,"107":0,"108":0,"109":0,"110":0,"111":0,"112":0,"113":0,"114":0,"115":0,"116":0,"117":0,"118":0,"119":0,"120":0,"121":0,"122":0,"123":0,"124":0,"125":0,"126":0,"127":0,"128":0,"129":0,"130":0,"131":0,"132":0,"133":0,"134":0,"135":0,"136":0,"137":0,"138":0,"139":0,"140":0,"141":0,"142":0,"143":0,"144":0,"145":0,"146":0,"147":0,"148":0,"149":0,"150":0,"151":0,"152":0,"153":0,"154":0,"155":0,"156":0,"157":0,"158":0,"159":0,"160":0,"161":0,"162":0,"163":0,"164":0,"165":0,"166":0,"167":0,"168":0,"169":0,"170":0,"171":0,"172":0,"173":0,"174":0,"175":0,"176":0,"177":0,"178":0,"179":0,"180":0,"181":0,"182":0,"183":0,"184":0,"185":0,"186":0,"187":0,"188":0,"189":0,"190":0,"191":0,"192":0,"193":0,"194":0,"195":0,"196":0,"197":0,"198":0,"199":0,"200":0,"201":0,"202":0,"203":0,"204":0,"205":0,"206":0,"207":0,"208":0,"209":0,"210":0,"211":0,"212":0,"213":0,"214":0,"215":0,"216":0,"217":0,"218":0,"219":0,"220":0,"221":0,"222":0,"223":0,"224":0,"225":0,"226":0,"227":0,"228":0,"229":0,"230":0,"231":0,"232":0,"233":0,"234":0,"235":0,"236":0,"237":0,"238":0,"239":0,"240":0,"241":0,"242":0,"243":0,"244":0,"245":0,"246":0,"247":0,"248":0,"249":0,"250":0,"251":0,"252":0,"253":0,"254":0,"255":0,"256":0,"257":0,"258":0,"259":0,"260":0,"261":0,"262":0,"263":0,"264":0,"265":0,"266":0,"267":0,"268":0,"269":0,"270":0,"271":0,"272":0,"273":0,"274":0,"275":0,"276":0,"277":0,"278":0,"279":0,"280":0,"281":0,"282":0,"283":0,"284":0,"285":0,"286":0,"287":0,"288":0,"289":0,"290":0,"291":0,"292":0,"293":0,"294":0,"295":0,"296":0,"297":0,"298":0,"299":0,"300":0,"301":0,"302":0,"303":0,"304":0,"305":0,"306":0,"307":0,"308":0,"309":0,"310":0,"311":0,"312":0,"313":0,"314":0,"315":0,"316":0,"317":0,"318":0,"319":0,"320":0,"321":0,"322":0,"323":0,"324":0,"325":0,"326":0,"327":0,"328":0,"329":0,"330":0,"331":0,"332":0,"333":0,"334":0,"335":0,"336":0,"337":0,"338":0,"339":0,"340":0,"341":0,"342":0,"343":0,"344":0,"345":0,"346":0,"347":0,"348":0,"349":0,"350":0,"351":0,"352":0,"353":0,"354":0,"355":0,"356":0,"357":0,"358":0,"359":0,"360":0,"361":0,"362":0,"363":0,"364":0,"365":0,"366":0,"367":0,"368":0,"369":0,"370":0,"371":0,"372":0,"373":0,"374":0,"375":0,"376":0,"377":0,"378":0,"379":0,"380":0,"381":0,"382":0,"383":0,"384":0,"385":0,"386":0,"387":0,"388":0,"389":0,"390":0,"391":0,"392":0,"393":0,"394":0,"395":0,"396":0,"397":0,"398":0,"399":0,"400":0,"401":0,"402":0,"403":0,"404":0,"405":0,"406":0,"407":0,"408":0,"409":0,"410":0,"411":0,"412":0,"413":0,"414":0},"branchMap":{"0":{"type":"branch","line":1,"loc":{"start":{"line":1,"column":13794},"end":{"line":415,"column":1}},"locations":[{"start":{"line":1,"column":13794},"end":{"line":415,"column":1}}]}},"b":{"0":[0]},"fnMap":{"0":{"name":"(empty-report)","decl":{"start":{"line":1,"column":13794},"end":{"line":415,"column":1}},"loc":{"start":{"line":1,"column":13794},"end":{"line":415,"column":1}},"line":1}},"f":{"0":0}} } ================================================ FILE: coverage/index.html ================================================ Code coverage report for All files

All files

0% Statements 0/794
0% Branches 0/6
0% Functions 0/6
0% Lines 0/794

Press n or j to go to the next uncovered block, b, p or k for the previous block.

File Statements Branches Functions Lines
src
0% 0/39 0% 0/1 0% 0/1 0% 0/39
src/commands
0% 0/755 0% 0/5 0% 0/5 0% 0/755
================================================ FILE: coverage/prettify.css ================================================ .pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee} ================================================ FILE: coverage/prettify.js ================================================ /* eslint-disable */ window.PR_SHOULD_USE_CONTINUATION=true;(function(){var h=["break,continue,do,else,for,if,return,while"];var u=[h,"auto,case,char,const,default,double,enum,extern,float,goto,int,long,register,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"];var p=[u,"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"];var l=[p,"alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,dynamic_cast,explicit,export,friend,inline,late_check,mutable,namespace,nullptr,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"];var x=[p,"abstract,boolean,byte,extends,final,finally,implements,import,instanceof,null,native,package,strictfp,super,synchronized,throws,transient"];var R=[x,"as,base,by,checked,decimal,delegate,descending,dynamic,event,fixed,foreach,from,group,implicit,in,interface,internal,into,is,lock,object,out,override,orderby,params,partial,readonly,ref,sbyte,sealed,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,var"];var r="all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,true,try,unless,until,when,while,yes";var w=[p,"debugger,eval,export,function,get,null,set,undefined,var,with,Infinity,NaN"];var s="caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END";var I=[h,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"];var f=[h,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"];var H=[h,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"];var A=[l,R,w,s+I,f,H];var e=/^(DIR|FILE|vector|(de|priority_)?queue|list|stack|(const_)?iterator|(multi)?(set|map)|bitset|u?(int|float)\d*)/;var C="str";var z="kwd";var j="com";var O="typ";var G="lit";var L="pun";var F="pln";var m="tag";var E="dec";var J="src";var P="atn";var n="atv";var N="nocode";var M="(?:^^\\.?|[+-]|\\!|\\!=|\\!==|\\#|\\%|\\%=|&|&&|&&=|&=|\\(|\\*|\\*=|\\+=|\\,|\\-=|\\->|\\/|\\/=|:|::|\\;|<|<<|<<=|<=|=|==|===|>|>=|>>|>>=|>>>|>>>=|\\?|\\@|\\[|\\^|\\^=|\\^\\^|\\^\\^=|\\{|\\||\\|=|\\|\\||\\|\\|=|\\~|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*";function k(Z){var ad=0;var S=false;var ac=false;for(var V=0,U=Z.length;V122)){if(!(al<65||ag>90)){af.push([Math.max(65,ag)|32,Math.min(al,90)|32])}if(!(al<97||ag>122)){af.push([Math.max(97,ag)&~32,Math.min(al,122)&~32])}}}}af.sort(function(av,au){return(av[0]-au[0])||(au[1]-av[1])});var ai=[];var ap=[NaN,NaN];for(var ar=0;arat[0]){if(at[1]+1>at[0]){an.push("-")}an.push(T(at[1]))}}an.push("]");return an.join("")}function W(al){var aj=al.source.match(new RegExp("(?:\\[(?:[^\\x5C\\x5D]|\\\\[\\s\\S])*\\]|\\\\u[A-Fa-f0-9]{4}|\\\\x[A-Fa-f0-9]{2}|\\\\[0-9]+|\\\\[^ux0-9]|\\(\\?[:!=]|[\\(\\)\\^]|[^\\x5B\\x5C\\(\\)\\^]+)","g"));var ah=aj.length;var an=[];for(var ak=0,am=0;ak=2&&ai==="["){aj[ak]=X(ag)}else{if(ai!=="\\"){aj[ak]=ag.replace(/[a-zA-Z]/g,function(ao){var ap=ao.charCodeAt(0);return"["+String.fromCharCode(ap&~32,ap|32)+"]"})}}}}return aj.join("")}var aa=[];for(var V=0,U=Z.length;V=0;){S[ac.charAt(ae)]=Y}}var af=Y[1];var aa=""+af;if(!ag.hasOwnProperty(aa)){ah.push(af);ag[aa]=null}}ah.push(/[\0-\uffff]/);V=k(ah)})();var X=T.length;var W=function(ah){var Z=ah.sourceCode,Y=ah.basePos;var ad=[Y,F];var af=0;var an=Z.match(V)||[];var aj={};for(var ae=0,aq=an.length;ae=5&&"lang-"===ap.substring(0,5);if(am&&!(ai&&typeof ai[1]==="string")){am=false;ap=J}if(!am){aj[ag]=ap}}var ab=af;af+=ag.length;if(!am){ad.push(Y+ab,ap)}else{var al=ai[1];var ak=ag.indexOf(al);var ac=ak+al.length;if(ai[2]){ac=ag.length-ai[2].length;ak=ac-al.length}var ar=ap.substring(5);B(Y+ab,ag.substring(0,ak),W,ad);B(Y+ab+ak,al,q(ar,al),ad);B(Y+ab+ac,ag.substring(ac),W,ad)}}ah.decorations=ad};return W}function i(T){var W=[],S=[];if(T.tripleQuotedStrings){W.push([C,/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,null,"'\""])}else{if(T.multiLineStrings){W.push([C,/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,null,"'\"`"])}else{W.push([C,/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,null,"\"'"])}}if(T.verbatimStrings){S.push([C,/^@\"(?:[^\"]|\"\")*(?:\"|$)/,null])}var Y=T.hashComments;if(Y){if(T.cStyleComments){if(Y>1){W.push([j,/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,null,"#"])}else{W.push([j,/^#(?:(?:define|elif|else|endif|error|ifdef|include|ifndef|line|pragma|undef|warning)\b|[^\r\n]*)/,null,"#"])}S.push([C,/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h|[a-z]\w*)>/,null])}else{W.push([j,/^#[^\r\n]*/,null,"#"])}}if(T.cStyleComments){S.push([j,/^\/\/[^\r\n]*/,null]);S.push([j,/^\/\*[\s\S]*?(?:\*\/|$)/,null])}if(T.regexLiterals){var X=("/(?=[^/*])(?:[^/\\x5B\\x5C]|\\x5C[\\s\\S]|\\x5B(?:[^\\x5C\\x5D]|\\x5C[\\s\\S])*(?:\\x5D|$))+/");S.push(["lang-regex",new RegExp("^"+M+"("+X+")")])}var V=T.types;if(V){S.push([O,V])}var U=(""+T.keywords).replace(/^ | $/g,"");if(U.length){S.push([z,new RegExp("^(?:"+U.replace(/[\s,]+/g,"|")+")\\b"),null])}W.push([F,/^\s+/,null," \r\n\t\xA0"]);S.push([G,/^@[a-z_$][a-z_$@0-9]*/i,null],[O,/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],[F,/^[a-z_$][a-z_$@0-9]*/i,null],[G,new RegExp("^(?:0x[a-f0-9]+|(?:\\d(?:_\\d+)*\\d*(?:\\.\\d*)?|\\.\\d\\+)(?:e[+\\-]?\\d+)?)[a-z]*","i"),null,"0123456789"],[F,/^\\[\s\S]?/,null],[L,/^.[^\s\w\.$@\'\"\`\/\#\\]*/,null]);return g(W,S)}var K=i({keywords:A,hashComments:true,cStyleComments:true,multiLineStrings:true,regexLiterals:true});function Q(V,ag){var U=/(?:^|\s)nocode(?:\s|$)/;var ab=/\r\n?|\n/;var ac=V.ownerDocument;var S;if(V.currentStyle){S=V.currentStyle.whiteSpace}else{if(window.getComputedStyle){S=ac.defaultView.getComputedStyle(V,null).getPropertyValue("white-space")}}var Z=S&&"pre"===S.substring(0,3);var af=ac.createElement("LI");while(V.firstChild){af.appendChild(V.firstChild)}var W=[af];function ae(al){switch(al.nodeType){case 1:if(U.test(al.className)){break}if("BR"===al.nodeName){ad(al);if(al.parentNode){al.parentNode.removeChild(al)}}else{for(var an=al.firstChild;an;an=an.nextSibling){ae(an)}}break;case 3:case 4:if(Z){var am=al.nodeValue;var aj=am.match(ab);if(aj){var ai=am.substring(0,aj.index);al.nodeValue=ai;var ah=am.substring(aj.index+aj[0].length);if(ah){var ak=al.parentNode;ak.insertBefore(ac.createTextNode(ah),al.nextSibling)}ad(al);if(!ai){al.parentNode.removeChild(al)}}}break}}function ad(ak){while(!ak.nextSibling){ak=ak.parentNode;if(!ak){return}}function ai(al,ar){var aq=ar?al.cloneNode(false):al;var ao=al.parentNode;if(ao){var ap=ai(ao,1);var an=al.nextSibling;ap.appendChild(aq);for(var am=an;am;am=an){an=am.nextSibling;ap.appendChild(am)}}return aq}var ah=ai(ak.nextSibling,0);for(var aj;(aj=ah.parentNode)&&aj.nodeType===1;){ah=aj}W.push(ah)}for(var Y=0;Y=S){ah+=2}if(V>=ap){Z+=2}}}var t={};function c(U,V){for(var S=V.length;--S>=0;){var T=V[S];if(!t.hasOwnProperty(T)){t[T]=U}else{if(window.console){console.warn("cannot override language handler %s",T)}}}}function q(T,S){if(!(T&&t.hasOwnProperty(T))){T=/^\s*]*(?:>|$)/],[j,/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],[L,/^(?:<[%?]|[%?]>)/],["lang-",/^]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),["default-markup","htm","html","mxml","xhtml","xml","xsl"]);c(g([[F,/^[\s]+/,null," \t\r\n"],[n,/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null,"\"'"]],[[m,/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],[P,/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],[L,/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);c(g([],[[n,/^[\s\S]+/]]),["uq.val"]);c(i({keywords:l,hashComments:true,cStyleComments:true,types:e}),["c","cc","cpp","cxx","cyc","m"]);c(i({keywords:"null,true,false"}),["json"]);c(i({keywords:R,hashComments:true,cStyleComments:true,verbatimStrings:true,types:e}),["cs"]);c(i({keywords:x,cStyleComments:true}),["java"]);c(i({keywords:H,hashComments:true,multiLineStrings:true}),["bsh","csh","sh"]);c(i({keywords:I,hashComments:true,multiLineStrings:true,tripleQuotedStrings:true}),["cv","py"]);c(i({keywords:s,hashComments:true,multiLineStrings:true,regexLiterals:true}),["perl","pl","pm"]);c(i({keywords:f,hashComments:true,multiLineStrings:true,regexLiterals:true}),["rb"]);c(i({keywords:w,cStyleComments:true,regexLiterals:true}),["js"]);c(i({keywords:r,hashComments:3,cStyleComments:true,multilineStrings:true,tripleQuotedStrings:true,regexLiterals:true}),["coffee"]);c(g([],[[C,/^[\s\S]+/]]),["regex"]);function d(V){var U=V.langExtension;try{var S=a(V.sourceNode);var T=S.sourceCode;V.sourceCode=T;V.spans=S.spans;V.basePos=0;q(U,T)(V);D(V)}catch(W){if("console" in window){console.log(W&&W.stack?W.stack:W)}}}function y(W,V,U){var S=document.createElement("PRE");S.innerHTML=W;if(U){Q(S,U)}var T={langExtension:V,numberLines:U,sourceNode:S};d(T);return S.innerHTML}function b(ad){function Y(af){return document.getElementsByTagName(af)}var ac=[Y("pre"),Y("code"),Y("xmp")];var T=[];for(var aa=0;aa=0){var ah=ai.match(ab);var am;if(!ah&&(am=o(aj))&&"CODE"===am.tagName){ah=am.className.match(ab)}if(ah){ah=ah[1]}var al=false;for(var ak=aj.parentNode;ak;ak=ak.parentNode){if((ak.tagName==="pre"||ak.tagName==="code"||ak.tagName==="xmp")&&ak.className&&ak.className.indexOf("prettyprint")>=0){al=true;break}}if(!al){var af=aj.className.match(/\blinenums\b(?::(\d+))?/);af=af?af[1]&&af[1].length?+af[1]:true:false;if(af){Q(aj,af)}S={langExtension:ah,sourceNode:aj,numberLines:af};d(S)}}}if(X]*(?:>|$)/],[PR.PR_COMMENT,/^<\!--[\s\S]*?(?:-\->|$)/],[PR.PR_PUNCTUATION,/^(?:<[%?]|[%?]>)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],["lang-",/^]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-handlebars",/^]*type\s*=\s*['"]?text\/x-handlebars-template['"]?\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-js",/^]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i],[PR.PR_DECLARATION,/^{{[#^>/]?\s*[\w.][^}]*}}/],[PR.PR_DECLARATION,/^{{&?\s*[\w.][^}]*}}/],[PR.PR_DECLARATION,/^{{{>?\s*[\w.][^}]*}}}/],[PR.PR_COMMENT,/^{{![^}]*}}/]]),["handlebars","hbs"]);PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[ \t\r\n\f]+/,null," \t\r\n\f"]],[[PR.PR_STRING,/^\"(?:[^\n\r\f\\\"]|\\(?:\r\n?|\n|\f)|\\[\s\S])*\"/,null],[PR.PR_STRING,/^\'(?:[^\n\r\f\\\']|\\(?:\r\n?|\n|\f)|\\[\s\S])*\'/,null],["lang-css-str",/^url\(([^\)\"\']*)\)/i],[PR.PR_KEYWORD,/^(?:url|rgb|\!important|@import|@page|@media|@charset|inherit)(?=[^\-\w]|$)/i,null],["lang-css-kw",/^(-?(?:[_a-z]|(?:\\[0-9a-f]+ ?))(?:[_a-z0-9\-]|\\(?:\\[0-9a-f]+ ?))*)\s*:/i],[PR.PR_COMMENT,/^\/\*[^*]*\*+(?:[^\/*][^*]*\*+)*\//],[PR.PR_COMMENT,/^(?:)/],[PR.PR_LITERAL,/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],[PR.PR_LITERAL,/^#(?:[0-9a-f]{3}){1,2}/i],[PR.PR_PLAIN,/^-?(?:[_a-z]|(?:\\[\da-f]+ ?))(?:[_a-z\d\-]|\\(?:\\[\da-f]+ ?))*/i],[PR.PR_PUNCTUATION,/^[^\s\w\'\"]+/]]),["css"]);PR.registerLangHandler(PR.createSimpleLexer([],[[PR.PR_KEYWORD,/^-?(?:[_a-z]|(?:\\[\da-f]+ ?))(?:[_a-z\d\-]|\\(?:\\[\da-f]+ ?))*/i]]),["css-kw"]);PR.registerLangHandler(PR.createSimpleLexer([],[[PR.PR_STRING,/^[^\)\"\']+/]]),["css-str"]); ================================================ FILE: coverage/sorter.js ================================================ /* eslint-disable */ var addSorting = (function() { 'use strict'; var cols, currentSort = { index: 0, desc: false }; // returns the summary table element function getTable() { return document.querySelector('.coverage-summary'); } // returns the thead element of the summary table function getTableHeader() { return getTable().querySelector('thead tr'); } // returns the tbody element of the summary table function getTableBody() { return getTable().querySelector('tbody'); } // returns the th element for nth column function getNthColumn(n) { return getTableHeader().querySelectorAll('th')[n]; } function onFilterInput() { const searchValue = document.getElementById('fileSearch').value; const rows = document.getElementsByTagName('tbody')[0].children; // Try to create a RegExp from the searchValue. If it fails (invalid regex), // it will be treated as a plain text search let searchRegex; try { searchRegex = new RegExp(searchValue, 'i'); // 'i' for case-insensitive } catch (error) { searchRegex = null; } for (let i = 0; i < rows.length; i++) { const row = rows[i]; let isMatch = false; if (searchRegex) { // If a valid regex was created, use it for matching isMatch = searchRegex.test(row.textContent); } else { // Otherwise, fall back to the original plain text search isMatch = row.textContent .toLowerCase() .includes(searchValue.toLowerCase()); } row.style.display = isMatch ? '' : 'none'; } } // loads the search box function addSearchBox() { var template = document.getElementById('filterTemplate'); var templateClone = template.content.cloneNode(true); templateClone.getElementById('fileSearch').oninput = onFilterInput; template.parentElement.appendChild(templateClone); } // loads all columns function loadColumns() { var colNodes = getTableHeader().querySelectorAll('th'), colNode, cols = [], col, i; for (i = 0; i < colNodes.length; i += 1) { colNode = colNodes[i]; col = { key: colNode.getAttribute('data-col'), sortable: !colNode.getAttribute('data-nosort'), type: colNode.getAttribute('data-type') || 'string' }; cols.push(col); if (col.sortable) { col.defaultDescSort = col.type === 'number'; colNode.innerHTML = colNode.innerHTML + ''; } } return cols; } // attaches a data attribute to every tr element with an object // of data values keyed by column name function loadRowData(tableRow) { var tableCols = tableRow.querySelectorAll('td'), colNode, col, data = {}, i, val; for (i = 0; i < tableCols.length; i += 1) { colNode = tableCols[i]; col = cols[i]; val = colNode.getAttribute('data-value'); if (col.type === 'number') { val = Number(val); } data[col.key] = val; } return data; } // loads all row data function loadData() { var rows = getTableBody().querySelectorAll('tr'), i; for (i = 0; i < rows.length; i += 1) { rows[i].data = loadRowData(rows[i]); } } // sorts the table using the data for the ith column function sortByIndex(index, desc) { var key = cols[index].key, sorter = function(a, b) { a = a.data[key]; b = b.data[key]; return a < b ? -1 : a > b ? 1 : 0; }, finalSorter = sorter, tableBody = document.querySelector('.coverage-summary tbody'), rowNodes = tableBody.querySelectorAll('tr'), rows = [], i; if (desc) { finalSorter = function(a, b) { return -1 * sorter(a, b); }; } for (i = 0; i < rowNodes.length; i += 1) { rows.push(rowNodes[i]); tableBody.removeChild(rowNodes[i]); } rows.sort(finalSorter); for (i = 0; i < rows.length; i += 1) { tableBody.appendChild(rows[i]); } } // removes sort indicators for current column being sorted function removeSortIndicators() { var col = getNthColumn(currentSort.index), cls = col.className; cls = cls.replace(/ sorted$/, '').replace(/ sorted-desc$/, ''); col.className = cls; } // adds sort indicators for current column being sorted function addSortIndicators() { getNthColumn(currentSort.index).className += currentSort.desc ? ' sorted-desc' : ' sorted'; } // adds event listeners for all sorter widgets function enableUI() { var i, el, ithSorter = function ithSorter(i) { var col = cols[i]; return function() { var desc = col.defaultDescSort; if (currentSort.index === i) { desc = !currentSort.desc; } sortByIndex(i, desc); removeSortIndicators(); currentSort.index = i; currentSort.desc = desc; addSortIndicators(); }; }; for (i = 0; i < cols.length; i += 1) { if (cols[i].sortable) { // add the click event handler on the th so users // dont have to click on those tiny arrows el = getNthColumn(i).querySelector('.sorter').parentElement; if (el.addEventListener) { el.addEventListener('click', ithSorter(i)); } else { el.attachEvent('onclick', ithSorter(i)); } } } } // adds sorting functionality to the UI return function() { if (!getTable()) { return; } cols = loadColumns(); loadData(); addSearchBox(); addSortIndicators(); enableUI(); }; })(); window.addEventListener('load', addSorting); ================================================ FILE: coverage/src/cli.ts.html ================================================ Code coverage report for src/cli.ts

All files / src cli.ts

0% Statements 0/39
0% Branches 0/1
0% Functions 0/1
0% Lines 0/39

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40                                                                               
#!/usr/bin/env node
import { Command } from "commander";
import { init } from "./commands/init";
import { syncOne } from "./commands/sync-one";
import { sync } from "./commands/sync";
import { syncForever } from "./commands/sync-forever";
import { visualize } from "./commands/visualize";

const program = new Command();

program
  .name("repomirror")
  .description("Sync and transform repositories using AI agents")
  .version("0.1.0");

program
  .command("init")
  .description("Initialize repomirror in current directory")
  .action(init);

program.command("sync").description("Run one sync iteration").action(sync);

program
  .command("sync-one")
  .description("Run one sync iteration (alias for sync)")
  .action(syncOne);

program
  .command("sync-forever")
  .description("Run sync continuously")
  .action(syncForever);

program
  .command("visualize")
  .description("Visualize Claude output stream")
  .option("--debug", "Show debug timestamps")
  .action((options) => visualize(options));

program.parse();
 
================================================ FILE: coverage/src/commands/index.html ================================================ Code coverage report for src/commands

All files src/commands

0% Statements 0/755
0% Branches 0/5
0% Functions 0/5
0% Lines 0/755

Press n or j to go to the next uncovered block, b, p or k for the previous block.

File Statements Branches Functions Lines
init.ts
0% 0/255 0% 0/1 0% 0/1 0% 0/255
sync-forever.ts
0% 0/41 0% 0/1 0% 0/1 0% 0/41
sync-one.ts
0% 0/6 0% 0/1 0% 0/1 0% 0/6
sync.ts
0% 0/38 0% 0/1 0% 0/1 0% 0/38
visualize.ts
0% 0/415 0% 0/1 0% 0/1 0% 0/415
================================================ FILE: coverage/src/commands/init.ts.html ================================================ Code coverage report for src/commands/init.ts

All files / src/commands init.ts

0% Statements 0/255
0% Branches 0/1
0% Functions 0/1
0% Lines 0/255

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                               
import { promises as fs } from "fs";
import { join, basename } from "path";
import inquirer from "inquirer";
import chalk from "chalk";
import ora from "ora";
import { query } from "@anthropic-ai/claude-code";
import { execa } from "execa";

interface InitOptions {
  sourceRepo: string;
  targetRepo: string;
  transformationInstructions: string;
}

export async function init(): Promise<void> {
  console.log(
    chalk.cyan("I'll help you maintain a transformed copy of this repo:\n"),
  );

  // Get current directory name for default target
  const currentDir = process.cwd();
  const repoName = basename(currentDir);
  const defaultTarget = `../${repoName}-transformed`;

  const answers = await inquirer.prompt<InitOptions>([
    {
      type: "input",
      name: "sourceRepo",
      message: "Source Repo you want to transform:",
      default: "./",
    },
    {
      type: "input",
      name: "targetRepo",
      message: "Where do you want to transform code to:",
      default: defaultTarget,
    },
    {
      type: "input",
      name: "transformationInstructions",
      message: "What changes do you want to make:",
      default: "translate this python repo to typescript",
    },
  ]);

  // Perform preflight checks
  await performPreflightChecks(answers.targetRepo);

  // Generate transformation prompt using Claude SDK
  const spinner = ora("Generating transformation prompt...").start();

  try {
    const optimizedPrompt = await generateTransformationPrompt(
      answers.sourceRepo,
      answers.targetRepo,
      answers.transformationInstructions,
    );

    spinner.succeed("Generated transformation prompt");

    // Create .repomirror directory and files
    await createRepoMirrorFiles(
      answers.sourceRepo,
      answers.targetRepo,
      optimizedPrompt,
    );

    console.log(chalk.green("\n✅ repomirror initialized successfully!"));
    console.log(chalk.cyan("\nNext steps:"));
    console.log(
      chalk.white(
        "• Run `npx repomirror sync` - this will run the sync.sh script once",
      ),
    );
    console.log(
      chalk.white(
        "• Run `npx repomirror sync-forever` - this will run the ralph.sh script, working forever to implement all the changes",
      ),
    );
  } catch (error) {
    spinner.fail("Failed to generate transformation prompt");
    console.error(
      chalk.red(
        `Error: ${error instanceof Error ? error.message : String(error)}`,
      ),
    );
    process.exit(1);
  }
}

async function performPreflightChecks(targetRepo: string): Promise<void> {
  const spinner = ora("Performing preflight checks...").start();

  try {
    // Check if target directory exists
    try {
      await fs.access(targetRepo);
    } catch {
      spinner.fail(`Target directory ${targetRepo} does not exist`);
      process.exit(1);
    }

    // Check if target directory is a git repo
    try {
      await execa("git", ["rev-parse", "--git-dir"], { cwd: targetRepo });
    } catch {
      spinner.fail(`Target directory ${targetRepo} is not a git repository`);
      process.exit(1);
    }

    // Check if target directory has at least one upstream
    try {
      const { stdout } = await execa("git", ["remote", "-v"], {
        cwd: targetRepo,
      });
      if (!stdout.trim()) {
        spinner.fail(
          `Target directory ${targetRepo} has no git remotes configured`,
        );
        process.exit(1);
      }
    } catch {
      spinner.fail(`Failed to check git remotes in ${targetRepo}`);
      process.exit(1);
    }

    // Check if Claude Code is configured
    try {
      const { stdout } = await execa("claude", ["-p", "say hi"]);
      if (!stdout.toLowerCase().includes("hi")) {
        spinner.fail("Claude Code test failed - response doesn't contain 'hi'");
        process.exit(1);
      }
    } catch {
      spinner.fail(
        "Claude Code is not properly configured. Please run `claude` to set up your profile",
      );
      process.exit(1);
    }

    spinner.succeed("All preflight checks passed");
  } catch (error) {
    spinner.fail(
      `Preflight check failed: ${error instanceof Error ? error.message : String(error)}`,
    );
    process.exit(1);
  }
}

async function generateTransformationPrompt(
  sourceRepo: string,
  targetRepo: string,
  transformationInstructions: string,
): Promise<string> {
  const metaPrompt = `your task is to generate an optimized prompt for repo transformation. The prompt should match the format of the examples below.

<example 1>
Your job is to port [SOURCE PATH] monorepo (for react) to [TARGET PATH] (for vue) and maintain the repository.

You have access to the current [SOURCE PATH] repositorty as well as the [TARGET PATH] repository.

Make a commit and push your changes after every single file edit.

Use the [TARGET_PATH]/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there.

The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing.
</example 1>

<example 2>
Your job is to port browser-use monorepo (Python) to browser-use-ts (Typescript) and maintain the repository.

You have access to the current [SOURCE PATH] repositorty as well as the target [TARGET_PATH] repository.

Make a commit and push your changes after every single file edit.

Use the [TARGET PATH]/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there.

The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing.
</example 2>

The users instructions for transformation are:

<user instructions>
${transformationInstructions}
</user instructions>

Your Job:

When you are ready, respond with EXACTLY the prompt matching the example, tailored for following the users' instructions and nothing else.

You should follow the format EXACTLY, filling in information based on what you learn from a CURSORY exploration of the source repo (this directory). Ensure you ONLY use the read tools (Read, Search, Grep, LS, Glob, etc) to explore the repo. You only need enough sense to build a good prompt, so dont use subagents.`;

  let result = "";
  for await (const message of query({
    prompt: metaPrompt,
  })) {
    if (message.type === "result" && !message.is_error) {
      result = (message as any).result || "";
      break;
    }
  }

  if (!result) {
    throw new Error("Failed to generate transformation prompt");
  }

  // Replace placeholders with actual paths
  return result
    .replace(/\[SOURCE PATH\]/g, sourceRepo)
    .replace(/\[TARGET PATH\]/g, targetRepo)
    .replace(/\[TARGET_PATH\]/g, targetRepo);
}

async function createRepoMirrorFiles(
  sourceRepo: string,
  targetRepo: string,
  optimizedPrompt: string,
): Promise<void> {
  const repoMirrorDir = join(process.cwd(), ".repomirror");

  // Create .repomirror directory
  await fs.mkdir(repoMirrorDir, { recursive: true });

  // Create prompt.md
  await fs.writeFile(join(repoMirrorDir, "prompt.md"), optimizedPrompt);

  // Create sync.sh
  const syncScript = `#!/bin/bash
cat .repomirror/prompt.md | \\
        claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir ${targetRepo} | \\
        tee -a .repomirror/claude_output.jsonl | \\
        npx repomirror visualize --debug;`;

  await fs.writeFile(join(repoMirrorDir, "sync.sh"), syncScript, {
    mode: 0o755,
  });

  // Create ralph.sh
  const ralphScript = `#!/bin/bash
while :; do
  ./.repomirror/sync.sh
  echo -e "===SLEEP===\\n===SLEEP===\\n"; echo 'looping';
  sleep 10;
done`;

  await fs.writeFile(join(repoMirrorDir, "ralph.sh"), ralphScript, {
    mode: 0o755,
  });

  // Create .gitignore
  await fs.writeFile(
    join(repoMirrorDir, ".gitignore"),
    "claude_output.jsonl\n",
  );
}
 
================================================ FILE: coverage/src/commands/sync-forever.ts.html ================================================ Code coverage report for src/commands/sync-forever.ts

All files / src/commands sync-forever.ts

0% Statements 0/41
0% Branches 0/1
0% Functions 0/1
0% Lines 0/41

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42                                                                                   
import { execa } from "execa";
import chalk from "chalk";
import { join } from "path";
import { promises as fs } from "fs";

export async function syncForever(): Promise<void> {
  const ralphScript = join(process.cwd(), ".repomirror", "ralph.sh");

  try {
    // Check if ralph.sh exists
    await fs.access(ralphScript);
  } catch {
    console.error(
      chalk.red(
        "Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.",
      ),
    );
    process.exit(1);
  }

  console.log(chalk.cyan("Running ralph.sh (continuous sync)..."));
  console.log(chalk.yellow("Press Ctrl+C to stop"));

  try {
    await execa("bash", [ralphScript], {
      stdio: "inherit",
      cwd: process.cwd(),
    });
  } catch (error) {
    if (error instanceof Error && (error as any).signal === "SIGINT") {
      console.log(chalk.yellow("\nStopped by user"));
    } else {
      console.error(
        chalk.red(
          `Sync forever failed: ${error instanceof Error ? error.message : String(error)}`,
        ),
      );
      process.exit(1);
    }
  }
}
 
================================================ FILE: coverage/src/commands/sync-one.ts.html ================================================ Code coverage report for src/commands/sync-one.ts

All files / src/commands sync-one.ts

0% Statements 0/6
0% Branches 0/1
0% Functions 0/1
0% Lines 0/6

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7             
import { sync } from "./sync";

// sync-one is just an alias for sync
export async function syncOne(): Promise<void> {
  await sync();
}
 
================================================ FILE: coverage/src/commands/sync.ts.html ================================================ Code coverage report for src/commands/sync.ts

All files / src/commands sync.ts

0% Statements 0/38
0% Branches 0/1
0% Functions 0/1
0% Lines 0/38

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39                                                                             
import { execa } from "execa";
import chalk from "chalk";
import { join } from "path";
import { promises as fs } from "fs";

export async function sync(): Promise<void> {
  const syncScript = join(process.cwd(), ".repomirror", "sync.sh");

  try {
    // Check if sync.sh exists
    await fs.access(syncScript);
  } catch {
    console.error(
      chalk.red(
        "Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.",
      ),
    );
    process.exit(1);
  }

  console.log(chalk.cyan("Running sync.sh..."));

  try {
    await execa("bash", [syncScript], {
      stdio: "inherit",
      cwd: process.cwd(),
    });

    console.log(chalk.green("Sync completed successfully"));
  } catch (error) {
    console.error(
      chalk.red(
        `Sync failed: ${error instanceof Error ? error.message : String(error)}`,
      ),
    );
    process.exit(1);
  }
}
 
================================================ FILE: coverage/src/commands/visualize.ts.html ================================================ Code coverage report for src/commands/visualize.ts

All files / src/commands visualize.ts

0% Statements 0/415
0% Branches 0/1
0% Functions 0/1
0% Lines 0/415

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                               
import { createInterface } from "node:readline";

const colors = {
  reset: "\x1b[0m",
  bright: "\x1b[1m",
  dim: "\x1b[2m",
  red: "\x1b[31m",
  green: "\x1b[32m",
  yellow: "\x1b[33m",
  blue: "\x1b[34m",
  magenta: "\x1b[35m",
  cyan: "\x1b[36m",
};

function getTypeColor(type: string): string {
  switch (type) {
    case "system":
      return colors.magenta;
    case "user":
      return colors.blue;
    case "assistant":
      return colors.green;
    case "tool_use":
      return colors.cyan;
    case "tool_result":
      return colors.yellow;
    case "message":
      return colors.dim;
    case "text":
      return colors.reset;
    default:
      return colors.reset;
  }
}

interface Todo {
  status: string;
  content: string;
  priority?: string;
}

function formatTodoList(todos: Todo[]): string {
  let output = `📋 ${colors.bright}${colors.cyan}Todo List Update${colors.reset}\n`;

  const statusColors = {
    completed: colors.dim + colors.green,
    in_progress: colors.bright + colors.yellow,
    pending: colors.reset,
  };

  const statusIcons = {
    completed: "✅",
    in_progress: "🔄",
    pending: "⏸️",
  };

  const priorityColors = {
    high: colors.red,
    medium: colors.yellow,
    low: colors.dim,
  };

  todos.forEach((todo, index) => {
    const statusColor =
      statusColors[todo.status as keyof typeof statusColors] || colors.reset;
    const statusIcon =
      statusIcons[todo.status as keyof typeof statusIcons] || "❓";
    const priorityColor =
      priorityColors[todo.priority as keyof typeof priorityColors] ||
      colors.reset;
    const checkbox = todo.status === "completed" ? "☑️" : "☐";

    output += `  ${checkbox} ${statusIcon} ${statusColor}${todo.content}${colors.reset}`;
    output += ` ${priorityColor}[${todo.priority}]${colors.reset}`;

    if (todo.status === "in_progress") {
      output += ` ${colors.bright}${colors.yellow}← ACTIVE${colors.reset}`;
    }

    output += "\n";
  });

  // Add summary stats
  const completed = todos.filter((t) => t.status === "completed").length;
  const inProgress = todos.filter((t) => t.status === "in_progress").length;
  const pending = todos.filter((t) => t.status === "pending").length;

  output += `\n  ${colors.dim}📊 Progress: ${colors.green}${completed} completed${colors.reset}`;
  output += `${colors.dim}, ${colors.yellow}${inProgress} active${colors.reset}`;
  output += `${colors.dim}, ${colors.reset}${pending} pending${colors.reset}`;
  output += `${colors.dim} (${Math.round((completed / todos.length) * 100)}% done)${colors.reset}`;

  return output;
}

function formatConcise(json: any): string {
  const type = json.type || "unknown";
  const typeColor = getTypeColor(type);

  let output = `⏺ ${typeColor}${type.charAt(0).toUpperCase() + type.slice(1)}${colors.reset}`;

  // Special handling for TodoWrite calls
  if (
    type === "assistant" &&
    json.message?.content?.[0]?.name === "TodoWrite"
  ) {
    const toolInput = json.message.content[0].input;
    if (toolInput?.todos && Array.isArray(toolInput.todos)) {
      return formatTodoList(toolInput.todos);
    }
  }

  // Add context based on type
  if (type === "assistant" && json.message?.content?.[0]?.name) {
    const toolName = json.message.content[0].name;
    const toolInput = json.message.content[0].input;

    // Format tool name with key arguments
    let toolDisplay = `${colors.cyan}${toolName}${colors.reset}`;

    if (toolInput) {
      const keyArgs = [];

      // Extract the most important argument for each tool type
      if (toolInput.file_path) keyArgs.push(toolInput.file_path);
      else if (toolInput.path) keyArgs.push(toolInput.path);
      else if (toolInput.pattern) keyArgs.push(`"${toolInput.pattern}"`);
      else if (toolInput.command) keyArgs.push(toolInput.command);
      else if (toolInput.cmd) keyArgs.push(toolInput.cmd);
      else if (toolInput.query) keyArgs.push(`"${toolInput.query}"`);
      else if (toolInput.description) keyArgs.push(toolInput.description);
      else if (toolInput.prompt)
        keyArgs.push(`"${toolInput.prompt.substring(0, 30)}..."`);
      else if (toolInput.url) keyArgs.push(toolInput.url);

      if (keyArgs.length > 0) {
        toolDisplay += `(${colors.green}${keyArgs[0]}${colors.reset})`;
      }
    }

    output = `⏺ ${toolDisplay}`;

    // Show additional arguments on next lines for complex tools
    if (toolInput) {
      const additionalArgs = [];

      if (toolName === "Bash" && toolInput.cwd) {
        additionalArgs.push(`cwd: ${toolInput.cwd}`);
      }
      if (toolInput.limit) additionalArgs.push(`limit: ${toolInput.limit}`);
      if (toolInput.offset) additionalArgs.push(`offset: ${toolInput.offset}`);
      if (toolInput.include)
        additionalArgs.push(`include: ${toolInput.include}`);
      if (toolInput.old_string && toolInput.new_string) {
        additionalArgs.push(
          `replace: "${toolInput.old_string.substring(0, 20)}..." → "${toolInput.new_string.substring(0, 20)}..."`,
        );
      }
      if (toolInput.timeout)
        additionalArgs.push(`timeout: ${toolInput.timeout}ms`);

      if (additionalArgs.length > 0) {
        output += `\n  ⎿  ${colors.dim}${additionalArgs.join(", ")}${colors.reset}`;
      }
    }
  } else if (type === "tool_result" && json.name) {
    output += `(${colors.cyan}${json.name}${colors.reset})`;
  } else if (type === "user" && json.message?.content?.[0]) {
    const content = json.message.content[0];
    if (content.type === "tool_result") {
      // Override the type display for tool results
      output = `⏺ ${colors.yellow}Tool Result${colors.reset}`;

      // Show result summary and first 2 lines
      if (content.content) {
        const resultText =
          typeof content.content === "string"
            ? content.content
            : JSON.stringify(content.content);
        const lines = resultText.split("\n");
        const chars = resultText.length;
        output += `\n  ⎿  ${colors.dim}${lines.length} lines, ${chars} chars${colors.reset}`;
        if (content.is_error) {
          output += ` ${colors.red}ERROR${colors.reset}`;
        }

        // Show first 2 lines of content
        if (lines.length > 0 && lines[0].trim()) {
          output += `\n  ⎿  ${colors.reset}${lines[0]}${colors.reset}`;
        }
        if (lines.length > 1 && lines[1].trim()) {
          output += `\n      ${colors.dim}${lines[1]}${colors.reset}`;
        }
      }
    } else if (content.text) {
      const text = content.text.substring(0, 50);
      output += `: ${colors.dim}${text}${text.length === 50 ? "..." : ""}${colors.reset}`;
    }
  } else if (type === "system" && json.subtype) {
    output += `(${colors.dim}${json.subtype}${colors.reset})`;
  }

  // Show assistant message content if it exists
  if (type === "assistant" && json.message?.content) {
    const textContent = json.message.content.find(
      (c: any) => c.type === "text",
    );
    if (textContent?.text) {
      const lines = textContent.text.split("\n").slice(0, 3); // Show first 3 lines
      output += `\n  ⎿  ${colors.reset}${lines[0]}${colors.reset}`;
      if (lines.length > 1) {
        output += `\n      ${colors.dim}${lines[1]}${colors.reset}`;
      }
      if (lines.length > 2) {
        output += `\n      ${colors.dim}${lines[2]}${colors.reset}`;
      }
      if (textContent.text.split("\n").length > 3) {
        output += `\n      ${colors.dim}...${colors.reset}`;
      }
    }
  }

  // Add summary line
  let summary = "";
  if (json.message?.usage) {
    const usage = json.message.usage;
    summary = `${usage.input_tokens || 0}/${usage.output_tokens || 0} tokens`;
  } else if (json.output && typeof json.output === "string") {
    summary = `${json.output.length} chars output`;
  } else if (json.message?.content?.length) {
    summary = `${json.message.content.length} content items`;
  } else if (json.tools?.length) {
    summary = `${json.tools.length} tools available`;
  }

  if (summary) {
    output += `\n  ⎿  ${colors.dim}${summary}${colors.reset}`;
  }

  return output;
}

function displayToolCallWithResult(
  toolCall: any,
  toolCallJson: any,
  toolResultJson: any,
  callTimestamp: string,
  resultTimestamp: string,
) {
  // Display the tool call header
  process.stdout.write(`${callTimestamp}${formatConcise(toolCallJson)}\n`);

  // Display the result
  const toolResult = toolResultJson.message.content[0];
  const isError = toolResult.is_error;
  const resultIcon = isError ? "❌" : "✅";
  const resultColor = isError ? colors.red : colors.green;

  process.stdout.write(
    `  ${resultTimestamp}${resultIcon} ${resultColor}Tool Result${colors.reset}`,
  );

  if (toolResult.content) {
    const resultText =
      typeof toolResult.content === "string"
        ? toolResult.content
        : JSON.stringify(toolResult.content);
    const lines = resultText.split("\n");
    const chars = resultText.length;

    process.stdout.write(
      ` ${colors.dim}(${lines.length} lines, ${chars} chars)${colors.reset}`,
    );

    if (isError) {
      process.stdout.write(` ${colors.red}ERROR${colors.reset}`);
    }

    // Show first few lines of result
    const linesToShow = Math.min(3, lines.length);
    for (let i = 0; i < linesToShow; i++) {
      if (lines[i].trim()) {
        const lineColor = i === 0 ? colors.reset : colors.dim;
        process.stdout.write(`\n    ⎿  ${lineColor}${lines[i]}${colors.reset}`);
      }
    }

    if (lines.length > linesToShow) {
      process.stdout.write(
        `\n    ⎿  ${colors.dim}... ${lines.length - linesToShow} more lines${colors.reset}`,
      );
    }
  }

  process.stdout.write("\n\n");
}

export async function visualize(
  options: { debug?: boolean } = {},
): Promise<void> {
  const rl = createInterface({
    input: process.stdin,
    crlfDelay: Infinity,
  });

  const debugMode = options.debug || process.argv.includes("--debug");
  const toolCalls = new Map(); // Store tool calls by their ID
  const pendingResults = new Map(); // Store results waiting for their tool calls
  let lastLine: any = null; // Track the last line to detect final message
  let isLastAssistantMessage = false;

  rl.on("line", (line) => {
    if (line.trim()) {
      const timestamp = debugMode
        ? `${colors.dim}[${new Date().toISOString()}]${colors.reset} `
        : "";

      try {
        const json = JSON.parse(line);

        // Check if this is a tool call
        if (json.type === "assistant" && json.message?.content?.[0]?.id) {
          const toolCall = json.message.content[0];
          const toolId = toolCall.id;

          // Store the tool call
          toolCalls.set(toolId, {
            toolCall: json,
            timestamp: timestamp,
          });

          // Check if we have a pending result for this tool call
          if (pendingResults.has(toolId)) {
            const result = pendingResults.get(toolId);
            displayToolCallWithResult(
              toolCall,
              json,
              result.toolResult,
              result.timestamp,
              timestamp,
            );
            pendingResults.delete(toolId);
          } else {
            // Display the tool call and mark it as pending
            process.stdout.write(`${timestamp + formatConcise(json)}\n`);
            process.stdout.write(
              `${colors.dim}  ⎿  Waiting for result...${colors.reset}\n\n`,
            );
          }
        }
        // Check if this is a tool result
        else if (
          json.type === "user" &&
          json.message?.content?.[0]?.type === "tool_result"
        ) {
          const toolResult = json.message.content[0];
          const toolId = toolResult.tool_use_id;

          if (toolCalls.has(toolId)) {
            // We have the matching tool call, display them together
            const stored = toolCalls.get(toolId);
            displayToolCallWithResult(
              stored.toolCall.message.content[0],
              stored.toolCall,
              json,
              stored.timestamp,
              timestamp,
            );
            toolCalls.delete(toolId);
          } else {
            // Store the result and wait for the tool call
            pendingResults.set(toolId, {
              toolResult: json,
              timestamp: timestamp,
            });
          }
        }
        // Check if this is the result message and display full content
        else if (json.type === "result" && json.result) {
          process.stdout.write(`${timestamp + formatConcise(json)}\n\n`);
          process.stdout.write(
            `${colors.bright}${colors.green}=== Final Result ===${colors.reset}\n\n`,
          );
          process.stdout.write(`${json.result}\n`);
        }
        // For all other message types, display normally
        else {
          process.stdout.write(`${timestamp + formatConcise(json)}\n\n`);
        }

        // Track if this might be the last assistant message
        lastLine = json;
        isLastAssistantMessage =
          json.type === "assistant" && !json.message?.content?.[0]?.id;
      } catch (_error) {
        process.stdout.write(
          `${timestamp}${colors.red}⏺ Parse Error${colors.reset}\n`,
        );
        process.stdout.write(
          `  ⎿  ${colors.dim}${line.substring(0, 50)}...${colors.reset}\n\n`,
        );
      }
    }
  });

  rl.on("close", () => {
    // If the last message was an assistant message (not a tool call), display the full content
    if (isLastAssistantMessage && lastLine?.message?.content?.[0]?.text) {
      process.stdout.write(
        `\n${colors.bright}${colors.green}=== Final Assistant Message ===${colors.reset}\n\n`,
      );
      process.stdout.write(`${lastLine.message.content[0].text}\n`);
    }
  });
}
 
================================================ FILE: coverage/src/index.html ================================================ Code coverage report for src

All files src

0% Statements 0/39
0% Branches 0/1
0% Functions 0/1
0% Lines 0/39

Press n or j to go to the next uncovered block, b, p or k for the previous block.

File Statements Branches Functions Lines
cli.ts
0% 0/39 0% 0/1 0% 0/1 0% 0/39
================================================ FILE: docs/remote-repo-design.md ================================================ # Remote Repository Support Design ## Overview This document outlines the design for adding remote repository push/pull support to the repomirror tool. The feature will enable users to: 1. Configure remote repositories for the transformed code 2. Push transformed changes to remote repositories 3. Pull updates from source repositories and re-sync transformations 4. Manage multiple remote destinations for different branches/environments ## Current Architecture Analysis The repomirror tool currently has these components: - **CLI Entry Point**: `/src/cli.ts` - Commander.js based CLI with commands - **Commands**: `/src/commands/` directory with individual command implementations - **Configuration**: `repomirror.yaml` file for persistent configuration - **Core Components**: - `init` - Interactive setup with preflight checks - `sync` - Single transformation run using `.repomirror/sync.sh` - `sync-forever` - Continuous sync using `.repomirror/ralph.sh` - `visualize` - Stream JSON output visualization ### Current Flow 1. `init` creates `.repomirror/` directory with scripts and config 2. `sync` runs Claude transformations via shell scripts 3. Transformed code is written to target directory 4. Target directory must be a git repo with remotes (preflight check) ## Proposed New Commands ### 1. `remote` Command **Purpose**: Manage remote repository configurations ```typescript // Usage examples: // npx repomirror remote add origin https://github.com/user/transformed-repo.git // npx repomirror remote add staging https://github.com/user/staging-repo.git // npx repomirror remote list // npx repomirror remote remove origin ``` **Implementation**: New file `/src/commands/remote.ts` ### 2. `push` Command **Purpose**: Push transformed changes to configured remotes ```typescript // Usage examples: // npx repomirror push # push to default remote (origin/main) // npx repomirror push --remote staging # push to specific remote // npx repomirror push --branch feature/new-feature # push to specific branch // npx repomirror push --all # push to all configured remotes ``` **Implementation**: New file `/src/commands/push.ts` ### 3. `pull` Command **Purpose**: Pull source changes and trigger re-sync ```typescript // Usage examples: // npx repomirror pull # pull source changes and re-sync // npx repomirror pull --source-only # pull source without re-sync // npx repomirror pull --sync-after # pull and run sync-forever after ``` **Implementation**: New file `/src/commands/pull.ts` ## Configuration Changes ### Enhanced `repomirror.yaml` ```yaml # Current fields sourceRepo: "./src" targetRepo: "../myproject-ts" transformationInstructions: "convert python to typescript" # New remote repository configuration remotes: origin: url: "https://github.com/user/myproject-ts.git" branch: "main" auto_push: true # auto-push after sync staging: url: "https://github.com/user/myproject-staging.git" branch: "develop" auto_push: false # New options push: default_remote: "origin" # default for push command default_branch: "main" # default branch to push to commit_prefix: "[repomirror]" # prefix for commit messages pull: auto_sync: true # automatically run sync after pull source_remote: "upstream" # remote name in source repo to pull from source_branch: "main" # branch to pull from in source repo ``` ### Configuration Schema Updates The `RepoMirrorConfig` interface in `/src/commands/init.ts` needs expansion: ```typescript interface RepoMirrorConfig { // Existing fields sourceRepo: string; targetRepo: string; transformationInstructions: string; // New remote configuration remotes?: { [remoteName: string]: { url: string; branch: string; auto_push?: boolean; }; }; // New push configuration push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; // New pull configuration pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; } ``` ## Integration with Existing Commands ### Enhanced `init` Command - Add remote configuration during setup - Prompt for remote repository URLs - Validate remote accessibility during preflight checks - Update existing preflight checks to verify push permissions ### Enhanced `sync` Command - Add `--push` flag to auto-push after sync - Add `--remote ` flag to specify push destination - Modify generated scripts to optionally include git operations ### Enhanced `sync-forever` Command - Add configuration option for auto-push after each sync - Add failure handling for git operations - Continue syncing even if push fails (with warnings) ## Git Operations Design ### Push Workflow 1. Check if target directory has uncommitted changes 2. Create commit with descriptive message (include source commit hash if available) 3. Push to specified remote/branch 4. Handle authentication failures gracefully 5. Support both HTTPS and SSH authentication ### Pull Workflow 1. Navigate to source repository 2. Pull latest changes from specified remote/branch 3. Check if changes affect files that impact transformation 4. Optionally trigger re-sync based on configuration 5. Handle merge conflicts in source repository ### Commit Message Strategy Format: `[repomirror] (source: )` Examples: - `[repomirror] Update API transformations (source: abc123f)` - `[repomirror] Convert authentication module to TypeScript (source: def456a)` ## Error Handling Considerations ### Authentication Failures - Detect SSH key issues vs HTTPS credential issues - Provide helpful error messages with setup instructions - Support multiple authentication methods - Graceful fallback when push fails ### Network Issues - Retry logic with exponential backoff - Offline mode detection - Queue operations for later when connectivity returns ### Git State Issues - Handle dirty working directory in target repo - Resolve merge conflicts in source repo pulls - Handle detached HEAD states - Branch switching and creation ### Sync Integration Errors - Continue sync-forever even if push fails - Log failures without stopping the sync process - Provide option to disable auto-push on repeated failures ## Implementation Plan ### Phase 1: Core Remote Management 1. Create `remote` command for adding/listing/removing remotes 2. Update configuration schema and init command 3. Add configuration validation and loading functions ### Phase 2: Push Functionality 1. Create `push` command with basic push operations 2. Add commit message generation 3. Integrate with sync commands (optional auto-push) 4. Add authentication and error handling ### Phase 3: Pull Functionality 1. Create `pull` command for source repository updates 2. Add change detection and sync triggering 3. Integrate with sync-forever workflow 4. Add conflict resolution guidance ### Phase 4: Enhanced Integration 1. Enhance sync scripts with git operations 2. Add branch management features 3. Add multi-remote push support 4. Performance optimizations and caching ## File Structure Changes ``` src/commands/ ├── init.ts # Enhanced with remote config ├── sync.ts # Enhanced with push options ├── sync-forever.ts # Enhanced with auto-push ├── sync-one.ts # (unchanged) ├── visualize.ts # (unchanged) ├── remote.ts # NEW: Remote management ├── push.ts # NEW: Push operations └── pull.ts # NEW: Pull operations src/lib/ # NEW: Shared utilities ├── git.ts # Git operation helpers ├── config.ts # Configuration management └── auth.ts # Authentication helpers ``` ## Testing Strategy ### Unit Tests - Mock git operations using `execa` mocks - Test configuration validation and loading - Test error handling scenarios - Test command parsing and validation ### Integration Tests - Test with real git repositories (using temp directories) - Test authentication flows - Test sync integration with git operations - Test error recovery scenarios ### End-to-End Tests - Full workflow tests with mock remote repositories - Test interaction between commands - Test configuration persistence - Test sync-forever with git operations ## Security Considerations ### Credential Management - Never store credentials in configuration files - Use git credential helpers - Support SSH key authentication - Provide clear documentation for authentication setup ### Repository Access - Validate remote URLs before adding - Check push permissions during setup - Handle private repository access - Support organization/team repository patterns ## CLI Updates ### New Command Structure ```bash # Remote management repomirror remote add [--branch ] repomirror remote list repomirror remote remove repomirror remote set-url # Push operations repomirror push [--remote ] [--branch ] [--all] repomirror push --dry-run # show what would be pushed # Pull operations repomirror pull [--source-only] [--sync-after] repomirror pull --check # check for source changes without pulling # Enhanced existing commands repomirror sync --push [--remote ] repomirror sync-forever --auto-push repomirror init --remote # add remote during init ``` ### Help Text Updates Update CLI help text and `--help` output to document new remote repository features and workflow examples. ## Migration Strategy ### Backward Compatibility - All new features are optional - Existing workflows continue unchanged - Configuration files are upgraded automatically - Graceful degradation when remotes not configured ### Upgrade Path 1. Existing users can add remotes via `repomirror remote add` 2. Configuration file is automatically migrated on first use 3. New features are opt-in via flags or configuration 4. Clear documentation for migrating existing setups ## Success Metrics ### Functionality Metrics - Commands execute without errors in common scenarios - Git operations handle authentication correctly - Sync integration works smoothly with push operations - Error messages are helpful and actionable ### Performance Metrics - Push operations complete in reasonable time - Sync-forever remains responsive with auto-push enabled - Pull operations detect changes efficiently - No significant performance regression in existing commands This design provides a comprehensive foundation for adding remote repository support while maintaining the existing architecture and user experience. The phased implementation allows for iterative development and testing of each component. ================================================ FILE: hack/ralph-validate.sh ================================================ #!/bin/bash echo "Testing repomirror init command validation" echo "==========================================" # Create temp directories SOURCE_DIR=$(mktemp -d) TARGET_DIR=$(mktemp -d) echo "Source dir: $SOURCE_DIR" echo "Target dir: $TARGET_DIR" # Setup source repo with hello.ts echo 'console.log("Hello World");' > "$SOURCE_DIR/hello.ts" echo "Created hello.ts in source directory" # Setup target repo as git repo with remote cd "$TARGET_DIR" git init git remote add origin https://github.com/example/test.git cd - > /dev/null echo "" echo "Running repomirror init..." echo "" # Run repomirror init from the source directory cd "$SOURCE_DIR" SKIP_CLAUDE_TEST=true timeout 30s node /Users/dex/go/src/github.com/dexhorthy/repomirror/dist/cli.js init \ --source "$SOURCE_DIR" \ --target "$TARGET_DIR" \ --instructions "translate this typescript repo to python" EXIT_CODE=$? if [ $EXIT_CODE -eq 124 ]; then echo "" echo "❌ Command timed out after 30 seconds" echo "Issue: The Claude SDK call in generateTransformationPrompt is hanging" echo "" echo "The problem is in src/commands/init.ts lines 319-332:" echo "- The async iterator is not properly handling all message types" echo "- Need to add timeout or better error handling" exit 1 elif [ $EXIT_CODE -eq 0 ]; then echo "" echo "✅ Init command completed successfully" # Check if required files were created if [ -f "$SOURCE_DIR/repomirror.yaml" ]; then echo "✅ repomirror.yaml created" else echo "❌ repomirror.yaml not created" fi if [ -d "$SOURCE_DIR/.repomirror" ]; then echo "✅ .repomirror directory created" # Check individual files for file in prompt.md sync.sh ralph.sh .gitignore; do if [ -f "$SOURCE_DIR/.repomirror/$file" ]; then echo " ✅ $file created" else echo " ❌ $file not created" fi done else echo "❌ .repomirror directory not created" fi else echo "" echo "❌ Command failed with exit code $EXIT_CODE" fi # Cleanup rm -rf "$SOURCE_DIR" "$TARGET_DIR" ================================================ FILE: hack/ralph.sh ================================================ while :; do cat prompt.md | \ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions | \ tee -a claude_output.jsonl | \ bun hack/visualize.ts --debug; echo -e "===SLEEP===\n===SLEEP===\n"; say 'looping'; sleep 10; done ================================================ FILE: hack/visualize.ts ================================================ #!/usr/bin/env bun import { createInterface } from 'node:readline'; const colors = { reset: '\x1b[0m', bright: '\x1b[1m', dim: '\x1b[2m', red: '\x1b[31m', green: '\x1b[32m', yellow: '\x1b[33m', blue: '\x1b[34m', magenta: '\x1b[35m', cyan: '\x1b[36m', }; function getTypeColor(type: string): string { switch (type) { case 'system': return colors.magenta; case 'user': return colors.blue; case 'assistant': return colors.green; case 'tool_use': return colors.cyan; case 'tool_result': return colors.yellow; case 'message': return colors.dim; case 'text': return colors.reset; default: return colors.reset; } } function _formatHeader(json: any, lineNumber: number): string { const type = json.type || 'unknown'; const typeColor = getTypeColor(type); let header = `${colors.dim}--- Line ${lineNumber} ${typeColor}[${type.toUpperCase()}]${colors.reset}`; // Add context based on type if (json.message?.role) { header += ` ${colors.dim}(${json.message.role})${colors.reset}`; } if (json.message?.content?.[0]?.name) { header += ` ${colors.cyan}${json.message.content[0].name}${colors.reset}`; } if (json.name) { header += ` ${colors.cyan}${json.name}${colors.reset}`; } if (json.subtype) { header += ` ${colors.dim}${json.subtype}${colors.reset}`; } return `${header} ${colors.dim}---${colors.reset}`; } function _colorizeJson(obj: any, indent = 0, path: string[] = []): string { const spaces = ' '.repeat(indent); if (obj === null) return `${colors.dim}null${colors.reset}`; if (typeof obj === 'boolean') return `${colors.yellow}${obj}${colors.reset}`; if (typeof obj === 'number') return `${colors.cyan}${obj}${colors.reset}`; if (typeof obj === 'string') { // Truncate very long strings if (obj.length > 200) { return `${colors.green}"${obj.substring(0, 197)}..."${colors.reset}`; } return `${colors.green}"${obj}"${colors.reset}`; } if (Array.isArray(obj)) { if (obj.length === 0) return '[]'; // For content arrays, show summary if (path.includes('content') && obj.length > 3) { const summary = obj.slice(0, 2).map((item) => _colorizeJson(item, indent + 1, [...path])); return `[\n${summary.join(',\n')},\n${spaces} ${colors.dim}... ${obj.length - 2} more items${colors.reset}\n${spaces}]`; } const items = obj.map((item) => `${spaces} ${_colorizeJson(item, indent + 1, [...path])}`); return `[\n${items.join(',\n')}\n${spaces}]`; } if (typeof obj === 'object') { const keys = Object.keys(obj); if (keys.length === 0) return '{}'; // Show only key fields for deeply nested objects const importantKeys = [ 'type', 'role', 'name', 'id', 'input', 'output', 'content', 'text', 'subtype', 'session_id', ]; const keysToShow = indent > 2 ? keys.filter((k) => importantKeys.includes(k)) : keys; if (keysToShow.length === 0 && keys.length > 0) { return `${colors.dim}{...${keys.length} keys}${colors.reset}`; } const items = keysToShow.map((key) => { let coloredKey = `${colors.blue}"${key}"${colors.reset}`; // Highlight important keys if (['type', 'name', 'role'].includes(key)) { coloredKey = `${colors.bright}${colors.blue}"${key}"${colors.reset}`; } const value = _colorizeJson(obj[key], indent + 1, [...path, key]); return `${spaces} ${coloredKey}: ${value}`; }); if (keysToShow.length < keys.length) { items.push( `${spaces} ${colors.dim}... ${keys.length - keysToShow.length} more keys${colors.reset}` ); } return `{\n${items.join(',\n')}\n${spaces}}`; } return String(obj); } function formatTodoList(todos: any[]): string { let output = `📋 ${colors.bright}${colors.cyan}Todo List Update${colors.reset}\n`; const statusColors = { completed: colors.dim + colors.green, in_progress: colors.bright + colors.yellow, pending: colors.reset, }; const statusIcons = { completed: '✅', in_progress: '🔄', pending: '⏸️', }; const priorityColors = { high: colors.red, medium: colors.yellow, low: colors.dim, }; todos.forEach((todo, index) => { const statusColor = statusColors[todo.status] || colors.reset; const statusIcon = statusIcons[todo.status] || '❓'; const priorityColor = priorityColors[todo.priority] || colors.reset; const checkbox = todo.status === 'completed' ? '☑️' : '☐'; output += ` ${checkbox} ${statusIcon} ${statusColor}${todo.content}${colors.reset}`; output += ` ${priorityColor}[${todo.priority}]${colors.reset}`; if (todo.status === 'in_progress') { output += ` ${colors.bright}${colors.yellow}← ACTIVE${colors.reset}`; } output += '\n'; }); // Add summary stats const completed = todos.filter((t) => t.status === 'completed').length; const inProgress = todos.filter((t) => t.status === 'in_progress').length; const pending = todos.filter((t) => t.status === 'pending').length; output += `\n ${colors.dim}📊 Progress: ${colors.green}${completed} completed${colors.reset}`; output += `${colors.dim}, ${colors.yellow}${inProgress} active${colors.reset}`; output += `${colors.dim}, ${colors.reset}${pending} pending${colors.reset}`; output += `${colors.dim} (${Math.round((completed / todos.length) * 100)}% done)${colors.reset}`; return output; } function formatConcise(json: any): string { const type = json.type || 'unknown'; const typeColor = getTypeColor(type); let output = `⏺ ${typeColor}${type.charAt(0).toUpperCase() + type.slice(1)}${colors.reset}`; // Special handling for TodoWrite calls if (type === 'assistant' && json.message?.content?.[0]?.name === 'TodoWrite') { const toolInput = json.message.content[0].input; if (toolInput?.todos && Array.isArray(toolInput.todos)) { return formatTodoList(toolInput.todos); } } // Add context based on type if (type === 'assistant' && json.message?.content?.[0]?.name) { const toolName = json.message.content[0].name; const toolInput = json.message.content[0].input; // Format tool name with key arguments let toolDisplay = `${colors.cyan}${toolName}${colors.reset}`; if (toolInput) { const keyArgs = []; // Extract the most important argument for each tool type if (toolInput.file_path) keyArgs.push(toolInput.file_path); else if (toolInput.path) keyArgs.push(toolInput.path); else if (toolInput.pattern) keyArgs.push(`"${toolInput.pattern}"`); else if (toolInput.command) keyArgs.push(toolInput.command); else if (toolInput.cmd) keyArgs.push(toolInput.cmd); else if (toolInput.query) keyArgs.push(`"${toolInput.query}"`); else if (toolInput.description) keyArgs.push(toolInput.description); else if (toolInput.prompt) keyArgs.push(`"${toolInput.prompt.substring(0, 30)}..."`); else if (toolInput.url) keyArgs.push(toolInput.url); if (keyArgs.length > 0) { toolDisplay += `(${colors.green}${keyArgs[0]}${colors.reset})`; } } output = `⏺ ${toolDisplay}`; // Show additional arguments on next lines for complex tools if (toolInput) { const additionalArgs = []; if (toolName === 'Bash' && toolInput.cwd) { additionalArgs.push(`cwd: ${toolInput.cwd}`); } if (toolInput.limit) additionalArgs.push(`limit: ${toolInput.limit}`); if (toolInput.offset) additionalArgs.push(`offset: ${toolInput.offset}`); if (toolInput.include) additionalArgs.push(`include: ${toolInput.include}`); if (toolInput.old_string && toolInput.new_string) { additionalArgs.push( `replace: "${toolInput.old_string.substring(0, 20)}..." → "${toolInput.new_string.substring(0, 20)}..."` ); } if (toolInput.timeout) additionalArgs.push(`timeout: ${toolInput.timeout}ms`); if (additionalArgs.length > 0) { output += `\n ⎿ ${colors.dim}${additionalArgs.join(', ')}${colors.reset}`; } } } else if (type === 'tool_result' && json.name) { output += `(${colors.cyan}${json.name}${colors.reset})`; } else if (type === 'user' && json.message?.content?.[0]) { const content = json.message.content[0]; if (content.type === 'tool_result') { // Override the type display for tool results output = `⏺ ${colors.yellow}Tool Result${colors.reset}`; // Show result summary and first 2 lines if (content.content) { const resultText = typeof content.content === 'string' ? content.content : JSON.stringify(content.content); const lines = resultText.split('\n'); const chars = resultText.length; output += `\n ⎿ ${colors.dim}${lines.length} lines, ${chars} chars${colors.reset}`; if (content.is_error) { output += ` ${colors.red}ERROR${colors.reset}`; } // Show first 2 lines of content if (lines.length > 0 && lines[0].trim()) { output += `\n ⎿ ${colors.reset}${lines[0]}${colors.reset}`; } if (lines.length > 1 && lines[1].trim()) { output += `\n ${colors.dim}${lines[1]}${colors.reset}`; } } } else if (content.text) { const text = content.text.substring(0, 50); output += `: ${colors.dim}${text}${text.length === 50 ? '...' : ''}${colors.reset}`; } } else if (type === 'system' && json.subtype) { output += `(${colors.dim}${json.subtype}${colors.reset})`; } // Show assistant message content if it exists if (type === 'assistant' && json.message?.content) { const textContent = json.message.content.find((c) => c.type === 'text'); if (textContent?.text) { const lines = textContent.text.split('\n').slice(0, 3); // Show first 3 lines output += `\n ⎿ ${colors.reset}${lines[0]}${colors.reset}`; if (lines.length > 1) { output += `\n ${colors.dim}${lines[1]}${colors.reset}`; } if (lines.length > 2) { output += `\n ${colors.dim}${lines[2]}${colors.reset}`; } if (textContent.text.split('\n').length > 3) { output += `\n ${colors.dim}...${colors.reset}`; } } } // Add summary line let summary = ''; if (json.message?.usage) { const usage = json.message.usage; summary = `${usage.input_tokens || 0}/${usage.output_tokens || 0} tokens`; } else if (json.output && typeof json.output === 'string') { summary = `${json.output.length} chars output`; } else if (json.message?.content?.length) { summary = `${json.message.content.length} content items`; } else if (json.tools?.length) { summary = `${json.tools.length} tools available`; } if (summary) { output += `\n ⎿ ${colors.dim}${summary}${colors.reset}`; } return output; } async function processStream() { const rl = createInterface({ input: process.stdin, crlfDelay: Infinity, }); const debugMode = process.argv.includes('--debug'); const toolCalls = new Map(); // Store tool calls by their ID const pendingResults = new Map(); // Store results waiting for their tool calls let lastLine = null; // Track the last line to detect final message let isLastAssistantMessage = false; rl.on('line', (line) => { if (line.trim()) { const timestamp = debugMode ? `${colors.dim}[${new Date().toISOString()}]${colors.reset} ` : ''; try { const json = JSON.parse(line); // Check if this is a tool call if (json.type === 'assistant' && json.message?.content?.[0]?.id) { const toolCall = json.message.content[0]; const toolId = toolCall.id; // Store the tool call toolCalls.set(toolId, { toolCall: json, timestamp: timestamp, }); // Check if we have a pending result for this tool call if (pendingResults.has(toolId)) { const result = pendingResults.get(toolId); displayToolCallWithResult( toolCall, json, result.toolResult, result.timestamp, timestamp ); pendingResults.delete(toolId); } else { // Display the tool call and mark it as pending process.stdout.write(`${timestamp + formatConcise(json)}\n`); process.stdout.write(`${colors.dim} ⎿ Waiting for result...${colors.reset}\n\n`); } } // Check if this is a tool result else if (json.type === 'user' && json.message?.content?.[0]?.type === 'tool_result') { const toolResult = json.message.content[0]; const toolId = toolResult.tool_use_id; if (toolCalls.has(toolId)) { // We have the matching tool call, display them together const stored = toolCalls.get(toolId); displayToolCallWithResult( stored.toolCall.message.content[0], stored.toolCall, json, stored.timestamp, timestamp ); toolCalls.delete(toolId); } else { // Store the result and wait for the tool call pendingResults.set(toolId, { toolResult: json, timestamp: timestamp, }); } } // Check if this is the result message and display full content else if (json.type === 'result' && json.result) { process.stdout.write(`${timestamp + formatConcise(json)}\n\n`); process.stdout.write(`${colors.bright}${colors.green}=== Final Result ===${colors.reset}\n\n`); process.stdout.write(`${json.result}\n`); } // For all other message types, display normally else { process.stdout.write(`${timestamp + formatConcise(json)}\n\n`); } // Track if this might be the last assistant message lastLine = json; isLastAssistantMessage = json.type === 'assistant' && !json.message?.content?.[0]?.id; } catch (_error) { process.stdout.write(`${timestamp}${colors.red}⏺ Parse Error${colors.reset}\n`); process.stdout.write(` ⎿ ${colors.dim}${line.substring(0, 50)}...${colors.reset}\n\n`); } } }); rl.on('close', () => { // If the last message was an assistant message (not a tool call), display the full content if (isLastAssistantMessage && lastLine?.message?.content?.[0]?.text) { process.stdout.write(`\n${colors.bright}${colors.green}=== Final Assistant Message ===${colors.reset}\n\n`); process.stdout.write(`${lastLine.message.content[0].text}\n`); } }); } function displayToolCallWithResult( toolCall: any, toolCallJson: any, toolResultJson: any, callTimestamp: string, resultTimestamp: string ) { // Display the tool call header process.stdout.write(`${callTimestamp}${formatConcise(toolCallJson)}\n`); // Display the result const toolResult = toolResultJson.message.content[0]; const isError = toolResult.is_error; const resultIcon = isError ? '❌' : '✅'; const resultColor = isError ? colors.red : colors.green; process.stdout.write( ` ${resultTimestamp}${resultIcon} ${resultColor}Tool Result${colors.reset}` ); if (toolResult.content) { const resultText = typeof toolResult.content === 'string' ? toolResult.content : JSON.stringify(toolResult.content); const lines = resultText.split('\n'); const chars = resultText.length; process.stdout.write(` ${colors.dim}(${lines.length} lines, ${chars} chars)${colors.reset}`); if (isError) { process.stdout.write(` ${colors.red}ERROR${colors.reset}`); } // Show first few lines of result const linesToShow = Math.min(3, lines.length); for (let i = 0; i < linesToShow; i++) { if (lines[i].trim()) { const lineColor = i === 0 ? colors.reset : colors.dim; process.stdout.write(`\n ⎿ ${lineColor}${lines[i]}${colors.reset}`); } } if (lines.length > linesToShow) { process.stdout.write( `\n ⎿ ${colors.dim}... ${lines.length - linesToShow} more lines${colors.reset}` ); } } process.stdout.write('\n\n'); } if (import.meta.main) { processStream().catch(console.error); } ================================================ FILE: package.json ================================================ { "name": "repomirror", "version": "0.3.0", "description": "Sync and transform repositories using AI agents", "main": "dist/index.js", "bin": { "repomirror": "dist/cli.js" }, "files": [ "dist/**/*", "src/templates/**/*" ], "scripts": { "build": "tsc && cp -r src/templates dist/", "dev": "tsx watch src/cli.ts", "test": "vitest", "lint": "eslint src --ext .ts", "fix": "eslint src --ext .ts --fix && prettier --write 'src/**/*.ts'", "check": "tsc --noEmit && npm run lint", "publish": "npm run build && npm publish" }, "keywords": [ "repository", "sync", "ai", "migration" ], "author": "", "license": "MIT", "dependencies": { "@anthropic-ai/claude-code": "^1.0.89", "@types/inquirer": "^9.0.9", "chalk": "^5.3.0", "commander": "^12.0.0", "execa": "^8.0.1", "fs-extra": "^11.2.0", "glob": "^10.3.10", "inquirer": "^12.9.3", "ora": "^8.0.1", "yaml": "^2.4.0" }, "devDependencies": { "@types/fs-extra": "^11.0.4", "@types/node": "^20.11.0", "@typescript-eslint/eslint-plugin": "^7.0.0", "@typescript-eslint/parser": "^7.0.0", "@vitest/coverage-v8": "^1.6.1", "eslint": "^8.56.0", "prettier": "^3.2.0", "tsx": "^4.7.0", "typescript": "^5.3.0", "vitest": "^1.2.0" } } ================================================ FILE: prompt-validate.md ================================================ 0a. read everything in specs/ 0b. review the files in src/ 1. pick the SINGLE highest priority item from VALIDATION_PLAN.md and test the functionality from the specs 2. if things are not working as expected, refine the specs/ to highlight the example that should work, and update the @IMPLEMENTATION_PLAN.md with updated steps to address the issue 3. update the @VALIDATION_PLAN.md with your progress and commit all changes with git add -A && git commit -m "..." ================================================ FILE: prompt.md ================================================ 0a. read everything in specs/ 0b. review the files in src/ 1. pick the SINGLE highest priority item from @IMPLEMENTATION_PLAN.md and implement it using up to 50 subagents 2. ensure the tests and checks are passing 3. update the @IMPLEMENTATION_PLAN.md with your progress and commit all changes with git add -A && git commit -m "..." if there is a discrepancy in the IMPLEMENTATION_PLAN.md and the spec, always update the IMPLEMENTATION_PLAN.md to match the spec. ================================================ FILE: prompts/ai-sdk-python.md ================================================ Your job is to port ai-sdk monorepo (for typescript) to ai-sdk-python (for python) and maintain the repository. You have access to the current ai-sdk repositorty as well as the ai-sdk-python repository. Make a commit and push your changes after every single file edit. Use the ai-sdk-python/agent/ directory as a scratchpad for your work via .MD files. Store long term plans and todo lists there. Keep track of your current status in a ai-sdk-python/agent/TODO.md file. When done with the initial port, feel free to occasionally check for GitHub issues and answer or resolve them. Make sure to let the user know that you are a bot if you answer an issue. Use the gh cli for this. ================================================ FILE: prompts/assistant-ui-vue.md ================================================ Your job is to port assistant-ui-react monorepo (for react) to assistant-ui-vue (for vue) and maintain the repository. You have access to the current assistant-ui-react repository as well as the assistant-ui-vue repository. Make a commit and push your changes after every single file edit. Use the assistant-ui-vue/.agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. ================================================ FILE: prompts/better-use.md ================================================ Your job is to port browser-use monorepo (Python) to browser-use-ts (better-use, Typescript) and maintain the repository. You have access to the current browser-use repositorty as well as the browser-use-ts repository. Make a commit and push your changes after every single file edit. Use the browser-use-ts/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. When done with the initial port, feel free to occasionally check for GitHub issues and answer or resolve them. Make sure to let the user know that you are a bot if you answer an issue. Use the gh cli for this. Keep track of your current status in TODO.md in the browser-use-ts/agent/ directory. ================================================ FILE: prompts/open-convex.md ================================================ read convex-11ms-full.txt IN ITS ENTIRETY - read every line, chunking if necessary 0a. familiarize your self with the code in openconvex 0b. Use the openconvex/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. Implement the SINGLE HIGHEST PRIORITY item from openconvex/agent/IMPLEMENTATION_PLAN.md with up to 50 subagents ensure the tests are passing make a commit and push your changes to the repo with git commit and git push thats it, you're done ================================================ FILE: prompts/open-dedalus.md ================================================ Your job is to implement open-dedalus based on the dedalus.llms.txt file. You have access to the current dedalus.llms.txt file as well as the open-dedalus repository. Make a commit and push your changes after every single file edit. Use the open-dedalus/agent/ directory as a scratchpad for your work via .MD files. Store long term plans and todo lists there. pick the single highest priority item from @IMPLEMENTATION_PLAN.md ================================================ FILE: prompts/repomirror.md ================================================ 0a. read everything in specs/ 0b. review the files in src/ 1. pick the SINGLE highest priority item from @IMPLEMENTATION_PLAN.md and implement it using up to 50 subagents 2. ensure the tests and checks are passing 3. update the @IMPLEMENTATION_PLAN.md with your progress and commit all changes with git add -A && git commit -m "..." if there is a discrepancy in the IMPLEMENTATION_PLAN.md and the spec, always update the IMPLEMENTATION_PLAN.md to match the spec. ================================================ FILE: repomirror.md ================================================ ## We Put a Coding Agent in a While Loop and It Shipped 6 Repos Overnight This weekend at the YC Agents hackathon, we asked ourselves: *what’s the weirdest way we could use a coding agent?* Our answer: run Claude Code headlessly in a loop forever and see what happens. Turns out, what happens is: you wake up to 1,000+ commits, six ported codebases, and a wonky little tool we’re calling [RepoMirror](https://github.com/repomirrorhq/repomirror). ### How We Got Here We recently stumbled upon a technique promoted by [Geoff Huntley](https://ghuntley.com/ralph/), to run a coding agent in a while loop: ``` while :; do cat prompt.md | amp; done ``` One of our team members, Simon, is the creator of assistant-ui, a React library for building AI interfaces in React. He gets a lot of requests to add Vue.js support, and he wondered if the approach would work for porting assistant-ui to Vue.js. ### How It Works Basically what we ended up doing sounds really dumb, but it works surprisingly well - we used Claude Code for the loop: ``` while :; do cat prompt.md | claude -p --dangerously-skip-permissions; done ``` The prompt was simple: ``` Your job is to port assistant-ui-react monorepo (for react) to assistant-ui-vue (for vue) and maintain the repository. You have access to the current assistant-ui-react repository as well as the assistant-ui-vue repository. Make a commit and push your changes after every single file edit. Use the assistant-ui-vue/.agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. ``` ### Porting Browser-Use to TypeScript Since we were at a hackathon, we wanted to do something related to the sponsor tooling, so we decided to see if Ralph could port [Browser Use](https://github.com/browser-use/browser-use), a YC-backed web agent tool, from Python to TypeScript. We kicked off the loop with a simple prompt: ``` Your job is to port browser-use monorepo (Python) to better-use (Typescript) and maintain the repository. Make a commit and push your changes after every single file edit. Keep track of your current status in browser-use-ts/agent/TODO.md ``` After a few iterations of the loop, it seemed to be on track: ![First few commits](./assets/first-commits.png) ### What Happened We worked until after 2 AM, setting up a few VM instances (tmux sessions on GCP instances) to run the Claude Code loops, then headed home to get a few hours of sleep. We came back in the morning to an [almost fully functional port](https://github.com/repomirrorhq/better-use) of Browser Use to TypeScript. ![Better Use CLI](./assets/better-use.png) Here it is scraping the top 3 posts from Hacker News. [better-use.webm](https://github.com/user-attachments/assets/bdd15e9e-08e4-48a2-a6f9-05a550347c46) [View on YouTube](https://www.youtube.com/watch?v=fqp8EbYOPk8) Here's the Browser Use founder [@gregpr07](https://x.com/gregpr07), checking out the code. We think he liked it. ![Gregor and Simon smiling at laptop](./assets/gregor.png) ### We Did Some More Since we were spinning up a few loops anyways, we decided to port a few more software projects to see what came out. The Vercel AI SDK is in TypeScript... but what if you could use it in Python? Yeah... [it kind of worked](https://github.com/repomirrorhq/ai-sdk-python). ![AI SDK FastAPI Adapters](./assets/ai-sdk-fastapi.png) If you've ever struggled with some of the deeply-nested type constructors in the AI SDK, well, now you can struggle with them in Python too. We also tried a few specs-to-code loops - recreating [Convex](https://www.convex.dev) and [Dedalus](https://dedalus.dev) from their docs' llms-full.txt. Here's a first pass at [OpenDedalus](https://github.com/repomirrorhq/open-dedalus). ![Image of open-dedalus README](./assets/open-dedalus.png) ### What We Learned **Early Stopping** When starting the agents, we had a lot of questions. Will the agent write tests? Will it get stuck in an infinite loop and drift into random unrelated features? We were pleasantly surprised to find that the agent wrote tests, kept to its original instructions, never got stuck, kept scope under control and mostly declared the port 'done'. After finishing the port, most of the agents settled for writing extra tests or continuously updating agent/TODO.md to clarify how "done" they were. In one instance, the agent actually used `pkill` [to terminate itself](https://www.youtube.com/watch?v=UOLBTRazZpM) after realizing it was stuck in an infinite loop. ![Agent stopping its own process](./assets/pkill.png) **Overachieving** Another cool emergent behavior (as is common with LLMs) - After finishing the initial port, our AI SDK Python agent started adding extra features such as an integration for Flask and FastAPI (something that has no counterpart in the AI SDK JS version), as well as support for schema validators via Pydantic, Marshmallow, JSONSchema, etc. **Keep the Prompt Simple** Overall we found that less is more - a simple prompt is better than a complex one. You want to focus on the engine, not the scaffolding. Different members of our team kicking off different projects played around with instructions and ordering. You can view the actual prompts we used in the [prompts folder](./prompts/). At one point we tried “improving” the prompt with Claude’s help. It ballooned to 1,500 words. The agent immediately got slower and dumber. We went back to 103 words and it was back on track. **This is not perfect** For both [better-use](https://github.com/repomirrorhq/better-use) and [ai-sdk-python](https://github.com/repomirrorhq/ai-sdk-python), the headless agent didn't always deliver perfect working code. We ended up going in and updating the prompts incrementally or working with Claude Code interactively to get things from 90% to 100%. And as much as Claude may [claim that things are 100% perfectly implemented](https://github.com/repomirrorhq/better-use/blob/master/agent/TODO.md), there are a few browser-use demos from the Python project that don't work yet in TypeScript. ### Numbers We spent a little less than $800 on inference for the project. Overall the agents made ~1100 commits across all software projects. Each Sonnet agent costs about $10.50/hour to run overnight. ### What We Built Around It As we went about bootstrapping so many of these, we put together a simple tool to help set up a source/target repo pair for this sync work. (and yeah, [we also built that with Ralph](https://github.com/repomirrorhq/repomirror/blob/main/prompt.md)) ``` npx repomirror init \ --source-dir ./browser-use \ --target-dir ./browser-use-zig \ --instructions "convert browser use to Zig" ``` Instructions can be anything like "convert from React to Vue" or "change from gRPC to REST using OpenAPI spec codegen". It's not perfectly architected, and it's a little hacky. But it was enough to hack things together, and it's designed similar to shadcn's "open-box" approach where it generates scripts/prompts that you are welcome to modify after the `init` phase. After the init phase, you'll have: ``` .repomirror/ - prompt.md - sync.sh - ralph.sh ``` When you've checked out the prompt and you're ready to test it, you can run `npx repomirror sync` to do a single iteration of the loop, and you can run `npx repomirror sync-forever` to kick off the Ralph infinite loop: [repomirror.webm](https://github.com/user-attachments/assets/7616825a-064d-4a5b-b1bc-08fc5f816172) [View on YouTube](https://www.youtube.com/watch?v=_GxemIzk2lo) If you wanna play with some of the other repos, they're listed on the [README](https://github.com/repomirrorhq/repomirror?tab=readme-ov-file#projects). [better-use](https://github.com/repomirrorhq/better-use) is now on npm: ``` npx better-use run ``` ai-sdk-python still has [one or two issues](https://github.com/repomirrorhq/ai-sdk-python/blob/master/agent/FIX_PLAN.md) that we're working on before it makes it to PyPI. ### Closing Thoughts As you might imagine, our thoughts are all a little chaotic and conflicting, so rather than a cohesive conclusion, we'll just leave with a few of our team's personal reflections on the last ~29 hours: > I'm a little bit feeling the AGI and it's mostly exciting but also terrifying. > The minimalist in me is happy to have hard proof that we are probably overcomplicating things. > Clear to me that we're at the very very beginning of the exponential takeoff curve. Thanks to the whole team [@yonom](https://x.com/simonfarshid) and [@AVGVSTVS96](https://x.com/AVGVSTVS96) from [assistant-ui](https://github.com/assistant-ui), [@dexhorthy](https://x.com/dexhorthy) from [HumanLayer](https://humanlayer.dev), [@Lantos1618](https://x.com/Lantos1618) from [github.gg](https://github.gg), and to [Geoff](https://x.com/GeoffreyHuntley) for the inspiration. and yeah, we forgot to get a team photo Screenshot 2025-08-25 at 9 03 34 AM ================================================ FILE: repomirror.yaml ================================================ sourceRepo: ./ targetRepo: /tmp/test-target2 transformationInstructions: transform typescript to python ================================================ FILE: specs/devtooling.md ================================================ developer workflow: publish to npm package repomirror unit tests in github actions: just recent node, no need for matrix testing ================================================ FILE: specs/github_actions.md ================================================ ## PR Sync ``` npx repomirror setup-github-pr-sync ``` ``` I'll help you set up a github actions workflow that will run the sync-one command on every pr merge Target repo, e.g. repomirrorhq/repomirror: Times to loop (advanced, recommend 3): [3] ``` Flags `--target-repo` and `--times-to-loop` creates .github/workflows/repomirror.yml target-repo and times-to-loop are persisted to repomirror.yaml similar to other flags and loaded as defaults when running the `setup-github-pr-sync` command. if already present, prompts "want to overwrite?" - exits if no. flag --overwrite to force overwrite. setup-github-pr-sync will create a github actions workflow that will run the sync-one command on every pr merge It will prompt the user for followup steps: - push to github - add secrets for ANTHROPIC_API_KEY and GITHUB_TOKEN, where GITHUB_TOKEN has read/push access to the target repo the workflow will always have a workflow_dispatch trigger, and an optional push trigger. the workflow will install repomirror and run the sync-one command in a loop N times ### dispatch sync ``` npx repomirror dispatch-sync ``` will check to ensure the workflow exists and is present in the current repo. will dispatch a workflow_dispatch event to the repomirror.yml workflow using the `gh` cli ``` this will prompt the user, describing what's gonna happen and get confirmation. a `-y` `--yes` flag will skip the confirmation prompt. A `--quiet` `-q` flag will suppress output. Quiet cannot be used without --yes, but --yes can be used without --quiet. ================================================ FILE: specs/repomirror.md ================================================ run repomirror init one line use prompt that asks ``` I'll help you maintain a transformed copy of this repo: Source Repo you want to tckransform: [..] # note 0 Where do you want to transform code to: [..] # note 1 What changes do you want to make: [e.g. "translate this python repo to typescript"] ``` note 0 - the default source repo is the current directory `./` note 1 - the default transform directory is `../REPONAME-transformed` all prompts should be loadable from a repomirror.yaml file if present, or settable with a command line flag. `repomirror help` or `--help` should explain how this works and the available options. - all prompts/cli flags are stashed to a repomirror.yaml during setup, and defaults are populated from the yaml file if present (instead of core defaults) preflight checks - ensure the target directory exists - ensure the target directory is initialized as a git repo - ensure the target directory has at least one upstream - ensure the user has a configured claude code profile (e.g. `claude -p "say hi" and ensure the output contains "hi" or "Hi") The preflight checks should print output about what they are doing and what they are checking. ### Background: Claude sdk: ``` import { query } from "@anthropic-ai/claude-code"; // IMPORTANT: Handle all message types to avoid hanging // The loop will wait forever if you don't handle errors or check for completion for await (const message of query({ prompt: "...PROMPT...", })) { if (message.type === "result") { if (message.is_error) { // Handle error case - MUST break or throw to avoid hanging throw new Error(message.result || "Claude SDK error"); } console.log(message.result); break; // Exit loop after getting result } // Consider adding timeout or other message type handlers } ``` ### step 1 use the claude sdk to read files and generate a prompt **CRITICAL IMPLEMENTATION NOTE**: The Claude SDK async iterator can hang indefinitely if not properly handled. Ensure: 1. Always break the loop after receiving a valid result 2. Handle error cases explicitly (check `is_error` flag) 3. Consider implementing a timeout mechanism 4. Store the result and break immediately - don't continue iterating where PROMPT conveys: ``` your task is to generate an optimized prompt for repo transformation. The prompt should match the format of the examples below. Your job is to port [SOURCE PATH] monorepo (for react) to [TARGET PATH] (for vue) and maintain the repository. You have access to the current [SOURCE PATH] repository as well as the [TARGET PATH] repository. Make a commit and push your changes after every single file edit. Use the [TARGET_PATH]/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. Your job is to port browser-use monorepo (Python) to browser-use-ts (Typescript) and maintain the repository. You have access to the current [SOURCE PATH] repository as well as the target [TARGET_PATH] repository. Make a commit and push your changes after every single file edit. Use the [TARGET PATH]/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. The users instructions for transformation are: Your Job: When you are ready, respond with EXACTLY the prompt matching the example, tailored for following the users' instructions and nothing else. You should follow the format EXACTLY, filling in information based on what you learn from a CURSORY exploration of the source repo (this directory). Ensure you ONLY use the read tools (Read, Search, Grep, LS, Glob, etc) to explore the repo. You only need enough sense to build a good prompt, so dont use subagents. ``` ### step 2 run claude code with the SDK using the prompt you generated with templating. As you are building you may need to test the phrasing to get claude to output ### step 3 add the following files to the source repo in .repomirror/ - .repomirror/prompt.md # contents from the prompt - .repomirror/sync.sh - .repomirror/ralph.sh - .repomirror/.gitignore .repomirror/.gitignore has the exact below contents: ``` claude_output.jsonl ``` sync.sh has the exact below contents: ``` cat .repomirror/prompt.md | \ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir PATH_TO_TARGET_REPO | \ tee -a .repomirror/claude_output.jsonl | \ npx repomirror visualize --debug; ``` ralph.sh has the exact below contents: ``` while :; do ./sync.sh echo -e "===SLEEP===\n===SLEEP===\n"; echo 'looping'; sleep 10; done ``` visualize command is a cli command that uses the exact same logic in the hack/visualize.ts file. The shell scripts are included in the npm dist/ bundle and baked into the package so they can be copied out of the package root by `npx repomirror init` cli command. **NOTE** - the above commands are sketches, you may find you need to adjust them to fit together well or to improve usability or reduce error-proneness. ### step 4 Output instructions to the user about next steps to run the commands ``` run `npx repomirror sync` - this will run the sync.sh script once run `npx repomirror sync-forever` - this will run the ralph.sh script, working forever to implement all the changes. The following files were created and safe to commit. Edit prompt.md as you see fit, but you probably dont want to run these files directly - .repomirror/prompt.md # prompt - .repomirror/sync.sh - .repomirror/ralph.sh - .repomirror/.gitignore ``` ### INIT CLI NOTES - if .repomirror already exists, prompt the user if they want to overwrite the existing .repomirror/ directory. Flag to `--overwrite` to force overwrite. ================================================ FILE: specs_deprecated_ignore/github_actions.md ================================================ ## PR Sync ``` npx repomirror setup-github-pr-sync ``` ``` I'll help you set up a github actions workflow that will run the sync-one command on every pr merge Target repo, e.g. repomirrorhq/repomirror: Times to loop (advanced, recommend 3): [3] ``` Flags `--target-repo` and `--times-to-loop` creates .github/workflows/repomirror.yml target-repo and times-to-loop are persisted to repomirror.yaml similar to other flags and loaded as defaults when running the `setup-github-pr-sync` command. if already present, prompts "want to overwrite?" - exits if no. flag --overwrite to force overwrite. setup-github-pr-sync will create a github actions workflow that will run the sync-one command on every pr merge It will prompt the user for followup steps: - push to github - add secrets for ANTHROPIC_API_KEY and GITHUB_TOKEN, where GITHUB_TOKEN has read/push access to the target repo the workflow will always have a workflow_dispatch trigger, and an optional push trigger. the workflow will install repomirror and run the sync-one command in a loop N times ### dispatch sync ``` npx repomirror dispatch-sync ``` will check to ensure the workflow exists and is present in the current repo. will dispatch a workflow_dispatch event to the repomirror.yml workflow using the `gh` cli ``` this will prompt the user, describing what's gonna happen and get confirmation. a `-y` `--yes` flag will skip the confirmation prompt. A `--quiet` `-q` flag will suppress output. Quiet cannot be used without --yes, but --yes can be used without --quiet. ================================================ FILE: specs_deprecated_ignore/remote_sync.md ================================================ x ================================================ FILE: specs_deprecated_ignore/sync_check ================================================ sync-check serves as a terminator for the sync-one loop. for example, ``` while ! sync-check TARGET; do sync-one TARGET done; ``` sync-check will check if the target repo is up to date with the source repo. it uses a similar prompt to sync-one, but with the final prompt to ================================================ FILE: src/cli.ts ================================================ #!/usr/bin/env node import { Command } from "commander"; import { init } from "./commands/init"; import { syncOne } from "./commands/sync-one"; import { sync } from "./commands/sync"; import { syncForever } from "./commands/sync-forever"; import { visualize } from "./commands/visualize"; import { remote } from "./commands/remote"; import { push } from "./commands/push"; import { pull } from "./commands/pull"; import { githubActions } from "./commands/github-actions"; import { setupGithubPrSync } from "./commands/setup-github-pr-sync"; import { dispatchSync } from "./commands/dispatch-sync"; const program = new Command(); program .name("repomirror") .description("Sync and transform repositories using AI agents") .version("0.1.0") .addHelpText( "after", ` Configuration: repomirror uses a repomirror.yaml file to store configuration. On first run, settings are saved to this file. On subsequent runs, the file is used for defaults. Command-line flags override both yaml defaults and interactive prompts. Examples: $ npx repomirror init Interactive mode with prompts $ npx repomirror init --source ./ --target ../myrepo-ts --instructions "convert to typescript" Skip prompts and use provided values $ npx repomirror help Show this help message`, ); program .command("init") .description("Initialize repomirror in current directory") .option("-s, --source ", "Source repository path") .option("-t, --target ", "Target repository path") .option("-i, --instructions ", "Transformation instructions") .action((options) => { init({ sourceRepo: options.source, targetRepo: options.target, transformationInstructions: options.instructions, }); }); program .command("sync") .description("Run one sync iteration") .option("--auto-push", "Automatically push to all remotes after successful sync") .action((options) => sync({ autoPush: options.autoPush })); program .command("sync-one") .description("Run one sync iteration (alias for sync)") .option("--auto-push", "Automatically push to all remotes after successful sync") .action((options) => syncOne({ autoPush: options.autoPush })); program .command("sync-forever") .description("Run sync continuously") .option("--auto-push", "Automatically push to all remotes after each sync iteration") .action((options) => syncForever({ autoPush: options.autoPush })); program .command("visualize") .description("Visualize Claude output stream") .option("--debug", "Show debug timestamps") .action((options) => visualize(options)); program .command("remote [args...]") .description("Manage remote repositories") .addHelpText( "after", ` Actions: add [branch] Add a remote repository (default branch: main) list List configured remotes remove Remove a remote repository Examples: $ npx repomirror remote add origin https://github.com/user/repo.git $ npx repomirror remote add staging https://github.com/user/staging.git develop $ npx repomirror remote list $ npx repomirror remote remove origin`, ) .action((action, args) => remote(action, ...args)); program .command("push") .description("Push transformed changes to remote repositories") .option("-r, --remote ", "Remote repository name") .option("-b, --branch ", "Branch name to push to") .option("--all", "Push to all configured remotes") .option("--dry-run", "Show what would be pushed without actually pushing") .addHelpText( "after", ` Examples: $ npx repomirror push Push to default remote (origin/main) $ npx repomirror push --remote staging Push to specific remote using its configured branch $ npx repomirror push --remote origin --branch feature-branch Push to specific remote and branch $ npx repomirror push --all Push to all configured remotes $ npx repomirror push --dry-run Show what would be pushed without pushing`, ) .action((options) => push(options)); program .command("pull") .description("Pull source changes and trigger re-sync") .option("--source-only", "Pull source without re-sync") .option("--sync-after", "Pull and run continuous sync after") .option("--check", "Check for source changes without pulling") .addHelpText( "after", ` Examples: $ npx repomirror pull Pull source changes and re-sync (if auto_sync is enabled) $ npx repomirror pull --source-only Pull source changes without triggering sync $ npx repomirror pull --sync-after Pull source changes and run continuous sync $ npx repomirror pull --check Check for available changes without pulling`, ) .action((options) => pull(options)); program .command("github-actions") .description("Generate GitHub Actions workflow for automated syncing") .option("-n, --name ", "Workflow file name (default: repomirror-sync.yml)") .option("-s, --schedule ", "Cron schedule for automatic runs") .option("--no-auto-push", "Disable automatic pushing to target repo") .addHelpText( "after", ` Generates a GitHub Actions workflow file for automated repository syncing. Examples: $ npx repomirror github-actions Interactive setup with prompts $ npx repomirror github-actions --schedule "0 */12 * * *" Run every 12 hours $ npx repomirror github-actions --no-auto-push Create workflow without automatic push to target Notes: - Requires repomirror.yaml to be present - Creates workflow in .github/workflows/ - You'll need to set up CLAUDE_API_KEY secret in GitHub`, ) .action((options) => githubActions({ workflowName: options.name, schedule: options.schedule, autoPush: options.autoPush, })); program .command("setup-github-pr-sync") .description("Setup GitHub Actions workflow for PR-triggered sync") .option("-t, --target-repo ", "Target repository (owner/repo format)") .option("-l, --times-to-loop ", "Number of times to loop sync-one command", "3") .option("--overwrite", "Force overwrite existing workflow file") .addHelpText( "after", ` Sets up a GitHub Actions workflow that runs sync-one command on PR merges. Examples: $ npx repomirror setup-github-pr-sync Interactive setup with prompts $ npx repomirror setup-github-pr-sync --target-repo myorg/myrepo Specify target repository directly $ npx repomirror setup-github-pr-sync --times-to-loop 5 Set number of sync iterations $ npx repomirror setup-github-pr-sync --overwrite Force overwrite existing workflow file Notes: - Creates .github/workflows/repomirror.yml - Settings are persisted to repomirror.yaml - Workflow has workflow_dispatch trigger for manual runs - Requires ANTHROPIC_API_KEY and GITHUB_TOKEN secrets`, ) .action((options) => setupGithubPrSync({ targetRepo: options.targetRepo, timesToLoop: options.timesToLoop ? parseInt(options.timesToLoop) : undefined, overwrite: options.overwrite, })); program .command("dispatch-sync") .description("Dispatch GitHub Actions workflow for manual sync") .option("-y, --yes", "Skip confirmation prompt") .option("-q, --quiet", "Suppress output (requires --yes)") .addHelpText( "after", ` Dispatches a workflow_dispatch event to the repomirror.yml workflow. Examples: $ npx repomirror dispatch-sync Interactive mode with confirmation prompt $ npx repomirror dispatch-sync --yes Skip confirmation and dispatch immediately $ npx repomirror dispatch-sync --yes --quiet Silent dispatch without output Notes: - Requires .github/workflows/repomirror.yml to exist - Requires GitHub CLI (gh) to be installed and authenticated - Workflow must have workflow_dispatch trigger enabled - --quiet flag can only be used with --yes flag`, ) .action((options) => dispatchSync({ yes: options.yes, quiet: options.quiet, })); program.parse(); ================================================ FILE: src/commands/dispatch-sync.ts ================================================ import { promises as fs } from "fs"; import { join } from "path"; import chalk from "chalk"; import ora from "ora"; import { execa } from "execa"; import inquirer from "inquirer"; interface DispatchSyncOptions { yes?: boolean; quiet?: boolean; } async function workflowExists(): Promise { try { const workflowPath = join(process.cwd(), ".github", "workflows", "repomirror.yml"); await fs.access(workflowPath); return true; } catch { return false; } } async function getRepoInfo(): Promise<{ owner: string; repo: string } | null> { try { const { stdout } = await execa("git", ["config", "--get", "remote.origin.url"]); const url = stdout.trim(); // Parse GitHub URL to extract owner/repo let match = url.match(/github\.com[:/]([^/]+)\/([^/.]+)/); if (!match) { return null; } return { owner: match[1], repo: match[2], }; } catch { return null; } } async function checkGhCliInstalled(): Promise { try { await execa("gh", ["--version"]); return true; } catch { return false; } } async function dispatchWorkflow(owner: string, repo: string, quiet: boolean): Promise { const spinner = quiet ? null : ora("Dispatching workflow...").start(); try { const { stdout } = await execa("gh", [ "workflow", "run", "repomirror.yml", "--repo", `${owner}/${repo}`, ]); if (spinner) { spinner.succeed("Workflow dispatched successfully"); } else if (!quiet) { console.log(chalk.green("✅ Workflow dispatched successfully")); } // Show workflow run URL if available if (!quiet && stdout) { console.log(chalk.gray(stdout)); } } catch (error) { if (spinner) { spinner.fail("Failed to dispatch workflow"); } if (error instanceof Error) { const errorMessage = error.message.toLowerCase(); if (errorMessage.includes("not found") || errorMessage.includes("404")) { console.error(chalk.red("Error: Workflow 'repomirror.yml' not found in the repository")); console.log(chalk.gray("Make sure the workflow file exists and you have access to the repository")); } else if (errorMessage.includes("authentication") || errorMessage.includes("permission")) { console.error(chalk.red("Error: Authentication failed")); console.log(chalk.gray("Make sure you're authenticated with GitHub CLI:")); console.log(chalk.gray(" gh auth login")); } else if (errorMessage.includes("workflow_dispatch")) { console.error(chalk.red("Error: Workflow does not support manual dispatch")); console.log(chalk.gray("Make sure the workflow has 'workflow_dispatch:' trigger")); } else { console.error(chalk.red(`Error dispatching workflow: ${error.message}`)); } } else { console.error(chalk.red(`Error dispatching workflow: ${String(error)}`)); } throw error; } } export async function dispatchSync(options: DispatchSyncOptions = {}): Promise { // Validate flag combination if (options.quiet && !options.yes) { console.error(chalk.red("Error: --quiet cannot be used without --yes")); console.log(chalk.gray("Use --quiet and --yes together, or use --yes alone")); process.exit(1); } // Check if repomirror.yml workflow exists const exists = await workflowExists(); if (!exists) { console.error(chalk.red("Error: .github/workflows/repomirror.yml not found")); console.log(chalk.gray("Run 'npx repomirror setup-github-pr-sync' to create the workflow first")); process.exit(1); } // Check if gh CLI is installed const ghInstalled = await checkGhCliInstalled(); if (!ghInstalled) { console.error(chalk.red("Error: GitHub CLI (gh) is not installed")); console.log(chalk.gray("Install it from: https://cli.github.com/")); process.exit(1); } // Get repository information const repoInfo = await getRepoInfo(); if (!repoInfo) { console.error(chalk.red("Error: Could not determine GitHub repository")); console.log(chalk.gray("Make sure you're in a git repository with a GitHub origin remote")); process.exit(1); } const { owner, repo } = repoInfo; // Show what's going to happen (unless quiet) if (!options.quiet) { console.log(chalk.cyan("This will dispatch the repomirror.yml workflow to run sync-one command")); console.log(chalk.gray(`Repository: ${owner}/${repo}`)); console.log(chalk.gray("Workflow: .github/workflows/repomirror.yml")); console.log(); } // Get confirmation (unless --yes flag is used) if (!options.yes) { const { shouldProceed } = await inquirer.prompt([ { type: "confirm", name: "shouldProceed", message: "Do you want to dispatch the workflow?", default: true, }, ]); if (!shouldProceed) { console.log(chalk.yellow("Operation cancelled")); process.exit(0); } } try { await dispatchWorkflow(owner, repo, options.quiet || false); if (!options.quiet) { console.log(chalk.green("\n✅ Workflow dispatch completed")); console.log(chalk.gray("You can monitor the workflow run at:")); console.log(chalk.gray(`https://github.com/${owner}/${repo}/actions`)); } } catch (error) { console.error( chalk.red( `Dispatch failed: ${error instanceof Error ? error.message : String(error)}` ) ); process.exit(1); } } ================================================ FILE: src/commands/github-actions.ts ================================================ import { promises as fs } from "fs"; import { join } from "path"; import chalk from "chalk"; import ora from "ora"; import inquirer from "inquirer"; interface GitHubActionsOptions { workflowName?: string; schedule?: string; autoPush?: boolean; } const DEFAULT_WORKFLOW = `name: RepoMirror Sync on: schedule: # Run every 6 hours - cron: '{SCHEDULE}' workflow_dispatch: # Allow manual trigger push: branches: [ main ] paths: - '.repomirror/**' - 'repomirror.yaml' jobs: sync: runs-on: ubuntu-latest steps: - name: Checkout source repository uses: actions/checkout@v3 with: path: source - name: Checkout target repository uses: actions/checkout@v3 with: repository: {TARGET_REPO} token: \${{ secrets.GITHUB_TOKEN }} path: target - name: Setup Node.js uses: actions/setup-node@v3 with: node-version: '20' - name: Install repomirror run: npm install -g repomirror - name: Setup Claude Code env: CLAUDE_API_KEY: \${{ secrets.CLAUDE_API_KEY }} run: | # Setup Claude Code with API key echo "Setting up Claude Code..." # Note: You'll need to configure Claude Code authentication # according to your setup. This might involve setting up # a service account or using API keys. - name: Run RepoMirror sync working-directory: source env: SKIP_CLAUDE_TEST: true # Skip interactive Claude test in CI run: | # Run the sync once npx repomirror sync - name: Push changes to target if: {AUTO_PUSH} working-directory: target run: | git config user.name "GitHub Actions" git config user.email "actions@github.com" if [ -n "$(git status --porcelain)" ]; then git add -A git commit -m "Automated sync from source repository" git push else echo "No changes to push" fi `; export async function githubActions(options?: GitHubActionsOptions): Promise { console.log(chalk.cyan("Setting up GitHub Actions workflow for RepoMirror\n")); // Check if repomirror.yaml exists const configPath = join(process.cwd(), "repomirror.yaml"); try { await fs.access(configPath); } catch { console.error(chalk.red("Error: repomirror.yaml not found")); console.log(chalk.yellow("Please run 'npx repomirror init' first")); process.exit(1); } // Load config to get target repo const yaml = await import("yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const config = yaml.parse(configContent); // Prompt for workflow configuration const answers = await inquirer.prompt([ { type: "input", name: "workflowName", message: "Workflow file name:", default: options?.workflowName || "repomirror-sync.yml", validate: (input) => { if (!input.endsWith(".yml") && !input.endsWith(".yaml")) { return "Workflow file must end with .yml or .yaml"; } return true; }, }, { type: "input", name: "schedule", message: "Cron schedule (or press enter for every 6 hours):", default: options?.schedule || "0 */6 * * *", when: !options?.schedule, }, { type: "confirm", name: "autoPush", message: "Automatically push changes to target repository?", default: options?.autoPush !== undefined ? options.autoPush : true, when: options?.autoPush === undefined, }, { type: "input", name: "targetRepo", message: "Target repository (owner/repo format for GitHub):", default: config.targetRepo?.replace(/^\.\.\//, "").replace(/-transformed$/, ""), validate: (input) => { if (!input || input === config.targetRepo) { return "Please provide the GitHub repository in owner/repo format"; } return true; }, }, ]); const finalOptions = { workflowName: options?.workflowName || answers.workflowName, schedule: options?.schedule || answers.schedule, autoPush: options?.autoPush !== undefined ? options.autoPush : answers.autoPush, targetRepo: answers.targetRepo, }; // Create .github/workflows directory if it doesn't exist const workflowDir = join(process.cwd(), ".github", "workflows"); await fs.mkdir(workflowDir, { recursive: true }); // Generate workflow content const workflowContent = DEFAULT_WORKFLOW .replace("{SCHEDULE}", finalOptions.schedule) .replace("{TARGET_REPO}", finalOptions.targetRepo) .replace("{AUTO_PUSH}", finalOptions.autoPush ? "true" : "false"); // Write workflow file const workflowPath = join(workflowDir, finalOptions.workflowName); const spinner = ora("Creating GitHub Actions workflow...").start(); try { await fs.writeFile(workflowPath, workflowContent); spinner.succeed("GitHub Actions workflow created"); console.log(chalk.green(`\n✅ Workflow created at ${workflowPath}`)); console.log(chalk.cyan("\nNext steps:")); console.log(chalk.white("1. Review and customize the workflow file as needed")); console.log(chalk.white("2. Set up the following GitHub secrets:")); console.log(chalk.gray(" - CLAUDE_API_KEY: Your Claude API key")); console.log(chalk.gray(" - GITHUB_TOKEN: Already provided by GitHub Actions")); console.log(chalk.white("3. Commit and push the workflow file to your repository")); console.log(chalk.white("4. The workflow will run on the schedule you specified")); console.log(chalk.yellow("\n⚠️ Important:")); console.log(chalk.yellow("Make sure to configure Claude Code authentication in the workflow")); console.log(chalk.yellow("This typically requires setting up API keys or service accounts")); } catch (error) { spinner.fail("Failed to create workflow"); console.error(chalk.red(`Error: ${error instanceof Error ? error.message : String(error)}`)); process.exit(1); } } ================================================ FILE: src/commands/init.ts ================================================ import { promises as fs } from "fs"; import path, { join, basename, resolve } from "path"; import inquirer from "inquirer"; import chalk from "chalk"; import ora from "ora"; import { query } from "@anthropic-ai/claude-code"; import { execa } from "execa"; import yaml from "yaml"; interface InitOptions { sourceRepo: string; targetRepo: string; transformationInstructions: string; } interface RemoteConfig { url: string; branch: string; auto_push?: boolean; } interface RepoMirrorConfig { sourceRepo: string; targetRepo: string; transformationInstructions: string; remotes?: { [remoteName: string]: RemoteConfig; }; push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; } async function loadExistingConfig(sourceRepo?: string): Promise | null> { try { // Use process.cwd() as base for relative paths to ensure proper mocking in tests const baseDir = sourceRepo && sourceRepo !== "./" ? resolve(process.cwd(), sourceRepo) : process.cwd(); const configPath = join(baseDir, "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); return yaml.parse(configContent) as RepoMirrorConfig; } catch { return null; } } async function saveConfig(config: RepoMirrorConfig, sourceRepo?: string): Promise { // Use process.cwd() as base for relative paths to ensure proper mocking in tests const baseDir = sourceRepo && sourceRepo !== "./" ? resolve(process.cwd(), sourceRepo) : process.cwd(); // Ensure the directory exists before writing the config await fs.mkdir(baseDir, { recursive: true }); const configPath = join(baseDir, "repomirror.yaml"); const configContent = yaml.stringify(config); await fs.writeFile(configPath, configContent, "utf-8"); } export async function init(cliOptions?: Partial): Promise { console.log( chalk.cyan("I'll help you maintain a transformed copy of this repo:\n"), ); // Load existing config if present from source repo const sourceRepoPath = cliOptions?.sourceRepo || "./"; const existingConfig = await loadExistingConfig(sourceRepoPath); if (existingConfig) { console.log( chalk.yellow("Found existing repomirror.yaml, using as defaults\n"), ); } // Get current directory name for default target const currentDir = process.cwd(); const repoName = basename(currentDir); const defaultTarget = existingConfig?.targetRepo || `../${repoName}-transformed`; // Merge CLI options, existing config, and defaults const defaults = { sourceRepo: cliOptions?.sourceRepo || existingConfig?.sourceRepo || "./", targetRepo: cliOptions?.targetRepo || existingConfig?.targetRepo || defaultTarget, transformationInstructions: cliOptions?.transformationInstructions || existingConfig?.transformationInstructions || "translate this python repo to typescript", }; const answers = await inquirer.prompt([ { type: "input", name: "sourceRepo", message: "Source Repo you want to transform:", default: defaults.sourceRepo, when: !cliOptions?.sourceRepo, }, { type: "input", name: "targetRepo", message: "Where do you want to transform code to:", default: defaults.targetRepo, when: !cliOptions?.targetRepo, }, { type: "input", name: "transformationInstructions", message: "What changes do you want to make:", default: defaults.transformationInstructions, when: !cliOptions?.transformationInstructions, }, ]); // Merge CLI options with answers const finalConfig: InitOptions = { sourceRepo: cliOptions?.sourceRepo || answers.sourceRepo, targetRepo: cliOptions?.targetRepo || answers.targetRepo, transformationInstructions: cliOptions?.transformationInstructions || answers.transformationInstructions, }; // Save configuration to repomirror.yaml in source directory await saveConfig(finalConfig, finalConfig.sourceRepo); console.log(chalk.green("\n✅ Saved configuration to repomirror.yaml")); // Perform preflight checks await performPreflightChecks(finalConfig.targetRepo); // Generate transformation prompt using Claude SDK console.log(chalk.cyan("\nGenerating transformation prompt...")); try { const optimizedPrompt = await generateTransformationPrompt( finalConfig.sourceRepo, finalConfig.targetRepo, finalConfig.transformationInstructions, ); console.log(chalk.green("✔ Generated transformation prompt")); // Create .repomirror directory and files await createRepoMirrorFiles( finalConfig.sourceRepo, finalConfig.targetRepo, optimizedPrompt, ); console.log(chalk.green("\n✅ repomirror initialized successfully!")); console.log(chalk.cyan("\nNext steps:")); console.log( chalk.white( "run `npx repomirror sync` - this will run the sync.sh script once", ), ); console.log(""); console.log( chalk.white( "run `npx repomirror sync-forever` - this will run the ralph.sh script, working forever to implement all the changes", ), ); console.log(""); console.log( chalk.white( "The following files were created and safe to commit. Edit prompt.md as you see fit, but you probably dont want to run these files directly", ), ); console.log(""); console.log(chalk.white("- .repomirror/prompt.md # prompt")); console.log(chalk.white("- .repomirror/sync.sh")); console.log(chalk.white("- .repomirror/ralph.sh")); console.log(chalk.white("- .repomirror/.gitignore")); } catch (error) { console.log(chalk.red("✖ Failed to generate transformation prompt")); console.error( chalk.red( `Error: ${error instanceof Error ? error.message : String(error)}`, ), ); process.exit(1); } } async function performPreflightChecks(targetRepo: string): Promise { console.log(chalk.cyan("\n🔍 Performing preflight checks...\n")); // Check if target directory exists console.log(chalk.white("1. Checking if target directory exists...")); const dirSpinner = ora(` Accessing ${targetRepo}`).start(); try { await fs.access(targetRepo); dirSpinner.succeed(` Target directory ${chalk.green(targetRepo)} exists`); } catch { dirSpinner.fail( ` Target directory ${chalk.red(targetRepo)} does not exist`, ); process.exit(1); } // Check if target directory is a git repo console.log( chalk.white("2. Checking if target directory is a git repository..."), ); const gitSpinner = ora( ` Verifying git repository in ${targetRepo}`, ).start(); try { const { stdout } = await execa("git", ["rev-parse", "--git-dir"], { cwd: targetRepo, }); const gitDir = stdout.trim(); gitSpinner.succeed( ` Git repository found (git dir: ${chalk.green(gitDir)})`, ); } catch { gitSpinner.fail( ` Target directory ${chalk.red(targetRepo)} is not a git repository`, ); process.exit(1); } // Check if target directory has at least one upstream console.log(chalk.white("3. Checking git remotes configuration...")); const remoteSpinner = ora(` Listing git remotes in ${targetRepo}`).start(); try { const { stdout } = await execa("git", ["remote", "-v"], { cwd: targetRepo, }); if (!stdout.trim()) { remoteSpinner.fail( ` Target directory ${chalk.red(targetRepo)} has no git remotes configured`, ); process.exit(1); } const remotes = stdout.trim().split("\n"); const remoteNames = [ ...new Set(remotes.map((line) => line.split("\t")[0])), ]; remoteSpinner.succeed( ` Found ${chalk.green(remoteNames.length)} git remote(s): ${chalk.green(remoteNames.join(", "))}`, ); // Show the actual remotes for user reference console.log(chalk.gray(" Remotes:")); remotes.forEach((remote) => { console.log(chalk.gray(` ${remote}`)); }); } catch { remoteSpinner.fail( ` Failed to check git remotes in ${chalk.red(targetRepo)}`, ); process.exit(1); } // Check if Claude Code is configured (skip in test mode) if (process.env.SKIP_CLAUDE_TEST === "true") { console.log(chalk.yellow("4. Skipping Claude Code test (test mode)")); } else { console.log(chalk.white("4. Testing Claude Code configuration...")); const claudeSpinner = ora(" Running Claude Code test command").start(); try { const { stdout } = await execa("claude", ["-p", "say hi"], { timeout: 30000, // 30 second timeout input: "", // Provide empty stdin to prevent claude from waiting }); // Check if Claude responded with something reasonable (not empty and more than 10 chars) if (!stdout || stdout.trim().length < 10) { claudeSpinner.fail( " Claude Code test failed - response was empty or too short", ); console.log( chalk.gray( ` Actual response: ${stdout.slice(0, 100)}${stdout.length > 100 ? "..." : ""}`, ), ); process.exit(1); } claudeSpinner.succeed(" Claude Code is working correctly"); console.log( chalk.gray( ` Claude response: ${stdout.slice(0, 100)}${stdout.length > 100 ? "..." : ""}`, ), ); } catch (error) { if (error instanceof Error && error.message.includes("timed out")) { claudeSpinner.fail(" Claude Code test timed out after 30 seconds"); console.log(chalk.red(" The 'claude -p \"say hi\"' command is not responding")); console.log(chalk.yellow(" This might indicate an issue with your Claude Code setup")); } else { claudeSpinner.fail(" Claude Code is not properly configured"); console.log(chalk.red(" Please run `claude` to set up your profile")); } if (error instanceof Error) { console.log(chalk.gray(` Error: ${error.message}`)); } process.exit(1); } } console.log(chalk.green("\n✅ All preflight checks passed!\n")); } async function generateTransformationPrompt( sourceRepo: string, targetRepo: string, transformationInstructions: string, ): Promise { // In test mode, return a simple template without calling Claude if (process.env.SKIP_CLAUDE_TEST === "true") { const testPrompt = `Your job is to port ${sourceRepo} to ${targetRepo} and maintain the repository. You have access to the current ${sourceRepo} repository as well as the ${targetRepo} repository. Make a commit and push your changes after every single file edit. Use the ${targetRepo}/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. ${transformationInstructions}`; return testPrompt; } const metaPrompt = `your task is to generate an optimized prompt for repo transformation. The prompt should match the format of the examples below. Your job is to port [SOURCE PATH] monorepo (for react) to [TARGET PATH] (for vue) and maintain the repository. You have access to the current [SOURCE PATH] repository as well as the [TARGET PATH] repository. Make a commit and push your changes after every single file edit. Use the [TARGET_PATH]/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. Your job is to port browser-use monorepo (Python) to browser-use-ts (Typescript) and maintain the repository. You have access to the current [SOURCE PATH] repository as well as the target [TARGET_PATH] repository. Make a commit and push your changes after every single file edit. Use the [TARGET PATH]/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing. The users instructions for transformation are: ${transformationInstructions} Your Job: When you are ready, respond with EXACTLY the prompt matching the example, tailored for following the users' instructions and nothing else. You should follow the format EXACTLY, filling in information based on what you learn from a CURSORY exploration of the source repo (this directory). Ensure you ONLY use the read tools (Read, Search, Grep, LS, Glob, etc) to explore the repo. You only need enough sense to build a good prompt, so dont use subagents.`; let result = ""; let toolCallCount = 0; let queryAborted = false; // Handle graceful shutdown during Claude SDK query const signalHandler = () => { console.log(chalk.yellow("\n\nStopping prompt generation...")); queryAborted = true; process.exit(0); }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); try { for await (const message of query({ prompt: metaPrompt, })) { if (queryAborted) break; // Stream tool calls to user in a compact format if (message.type === "assistant" && (message as any).message?.content?.[0]?.name) { const toolName = (message as any).message.content[0].name; const toolInput = (message as any).message.content[0].input; toolCallCount++; // Build compact tool display let toolDisplay = ` ${chalk.cyan(toolName)}`; // Add key argument for the tool if (toolInput) { if (toolInput.file_path) { toolDisplay += `(${chalk.green(toolInput.file_path)})`; } else if (toolInput.path) { toolDisplay += `(${chalk.green(toolInput.path)})`; } else if (toolInput.pattern) { toolDisplay += `(${chalk.green(`"${toolInput.pattern}"`)})`; } else if (toolInput.command) { const cmd = toolInput.command.length > 50 ? toolInput.command.substring(0, 50) + "..." : toolInput.command; toolDisplay += `(${chalk.green(cmd)})`; } else if (toolInput.query) { const q = toolInput.query.length > 30 ? toolInput.query.substring(0, 30) + "..." : toolInput.query; toolDisplay += `(${chalk.green(`"${q}"`)})`; } } console.log(toolDisplay); } if (message.type === "result") { if (message.is_error) { throw new Error( (message as any).result || "Claude SDK error during prompt generation", ); } result = (message as any).result || ""; break; } } } finally { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); } if (toolCallCount > 0) { console.log(chalk.gray(` Analyzed codebase with ${toolCallCount} tool calls`)); } if (!result) { throw new Error( "Failed to generate transformation prompt - no result received", ); } // Replace placeholders with actual paths return result .replace(/\[SOURCE PATH\]/g, sourceRepo) .replace(/\[TARGET PATH\]/g, targetRepo) .replace(/\[TARGET_PATH\]/g, targetRepo); } async function createRepoMirrorFiles( sourceRepo: string, targetRepo: string, optimizedPrompt: string, ): Promise { // Use process.cwd() as base for relative paths to ensure proper mocking in tests const sourceDir = sourceRepo && sourceRepo !== "./" ? resolve(process.cwd(), sourceRepo) : process.cwd(); const repoMirrorDir = join(sourceDir, ".repomirror"); // Create .repomirror directory await fs.mkdir(repoMirrorDir, { recursive: true }); // Create prompt.md await fs.writeFile(join(repoMirrorDir, "prompt.md"), optimizedPrompt); // Get template directory - look for templates in the package const templateDir = await getTemplateDir(); // Create sync.sh from template const syncTemplate = await fs.readFile(join(templateDir, "sync.sh.template"), "utf8"); const syncScript = syncTemplate.replace(/\${targetRepo}/g, targetRepo); await fs.writeFile(join(repoMirrorDir, "sync.sh"), syncScript, { mode: 0o755, }); // Create ralph.sh from template const ralphTemplate = await fs.readFile(join(templateDir, "ralph.sh.template"), "utf8"); await fs.writeFile(join(repoMirrorDir, "ralph.sh"), ralphTemplate, { mode: 0o755, }); // Create .gitignore from template const gitignoreTemplate = await fs.readFile(join(templateDir, "gitignore.template"), "utf8"); await fs.writeFile( join(repoMirrorDir, ".gitignore"), gitignoreTemplate + "\n", ); } async function getTemplateDir(): Promise { // First try to find templates in the package dist (for published package) try { const packageRoot = path.dirname(path.dirname(__dirname)); // From dist/commands to project root const distTemplateDir = join(packageRoot, "dist", "templates"); await fs.access(distTemplateDir); return distTemplateDir; } catch { // Fallback to src templates (for development and tests) const packageRoot = path.dirname(path.dirname(__dirname)); // From src/commands to project root const srcTemplateDir = join(packageRoot, "src", "templates"); try { await fs.access(srcTemplateDir); return srcTemplateDir; } catch { // If neither works, throw a more helpful error throw new Error(`Could not find templates in either dist/templates or src/templates. Package root: ${packageRoot}`); } } } ================================================ FILE: src/commands/pull.ts ================================================ import { promises as fs } from "fs"; import { join } from "path"; import chalk from "chalk"; import ora from "ora"; import { execa } from "execa"; import yaml from "yaml"; interface RemoteConfig { url: string; branch: string; auto_push?: boolean; } interface RepoMirrorConfig { sourceRepo: string; targetRepo: string; transformationInstructions: string; remotes?: { [remoteName: string]: RemoteConfig; }; push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; } interface PullOptions { sourceOnly?: boolean; syncAfter?: boolean; check?: boolean; } async function loadConfig(): Promise { try { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); return yaml.parse(configContent) as RepoMirrorConfig; } catch { return null; } } async function checkSourceRepoStatus(sourceRepo: string): Promise<{ isGitRepo: boolean; hasRemotes: boolean; currentBranch: string | null; hasUncommittedChanges: boolean; }> { try { // Check if it's a git repository await execa("git", ["rev-parse", "--git-dir"], { cwd: sourceRepo }); // Get current branch const { stdout: branchOutput } = await execa( "git", ["branch", "--show-current"], { cwd: sourceRepo }, ); const currentBranch = branchOutput.trim(); // Check for remotes const { stdout: remotesOutput } = await execa("git", ["remote"], { cwd: sourceRepo, }); const hasRemotes = remotesOutput.trim().length > 0; // Check for uncommitted changes const { stdout: statusOutput } = await execa( "git", ["status", "--porcelain"], { cwd: sourceRepo }, ); const hasUncommittedChanges = statusOutput.trim().length > 0; return { isGitRepo: true, hasRemotes, currentBranch, hasUncommittedChanges, }; } catch { return { isGitRepo: false, hasRemotes: false, currentBranch: null, hasUncommittedChanges: false, }; } } async function getRemoteChangesSummary( sourceRepo: string, remoteName: string, remoteBranch: string, ): Promise<{ hasNewCommits: boolean; commitCount: number; commitMessages: string[]; }> { try { // Fetch latest changes from remote await execa("git", ["fetch", remoteName], { cwd: sourceRepo }); // Check for new commits const { stdout: countOutput } = await execa( "git", ["rev-list", "--count", `HEAD..${remoteName}/${remoteBranch}`], { cwd: sourceRepo }, ); const commitCount = parseInt(countOutput.trim(), 10) || 0; if (commitCount === 0 || isNaN(commitCount)) { return { hasNewCommits: false, commitCount: 0, commitMessages: [], }; } // Get commit messages for preview const { stdout: logOutput } = await execa( "git", [ "log", "--oneline", "--no-merges", `-${Math.min(commitCount, 5)}`, // Show up to 5 commits `HEAD..${remoteName}/${remoteBranch}`, ], { cwd: sourceRepo }, ); const commitMessages = logOutput.trim().split("\n").filter(Boolean); return { hasNewCommits: true, commitCount, commitMessages, }; } catch (error) { throw new Error(`Failed to check for remote changes: ${error}`); } } async function pullSourceChanges( sourceRepo: string, remoteName: string, remoteBranch: string, ): Promise<{ success: boolean; conflictsDetected: boolean }> { const spinner = ora( `Pulling changes from ${remoteName}/${remoteBranch}...`, ).start(); try { // Attempt to pull changes const { stdout, stderr } = await execa( "git", ["pull", remoteName, remoteBranch], { cwd: sourceRepo }, ); // Check for merge conflicts const conflictsDetected = stderr.includes("CONFLICT") || stdout.includes("CONFLICT") || stderr.includes("Automatic merge failed"); if (conflictsDetected) { spinner.fail("Pull completed with merge conflicts"); return { success: false, conflictsDetected: true }; } spinner.succeed(`Successfully pulled from ${remoteName}/${remoteBranch}`); // Show pull summary if there's useful information if (stdout && !stdout.includes("Already up to date")) { console.log(chalk.gray("Pull summary:")); console.log(chalk.gray(stdout.split("\n").slice(0, 3).join("\n"))); } return { success: true, conflictsDetected: false }; } catch (error) { spinner.fail(`Failed to pull from ${remoteName}/${remoteBranch}`); if (error instanceof Error) { const errorMessage = error.message.toLowerCase(); if ( errorMessage.includes("authentication failed") || errorMessage.includes("permission denied") ) { console.log(chalk.yellow("\n🔐 Authentication Issue Detected:")); console.log( chalk.gray("• For HTTPS: Check your GitHub token or credentials"), ); console.log( chalk.gray( "• For SSH: Ensure your SSH key is added to your GitHub account", ), ); } else if (errorMessage.includes("couldn't find remote ref")) { console.log( chalk.yellow( `\n🌿 Branch '${remoteBranch}' not found on remote '${remoteName}'`, ), ); console.log( chalk.gray("• Check the branch name in your repomirror.yaml"), ); console.log( chalk.gray("• List available branches with: git ls-remote --heads"), ); } } throw error; } } async function triggerSync(syncAfter: boolean): Promise { const syncScript = join(process.cwd(), ".repomirror", "sync.sh"); const ralphScript = join(process.cwd(), ".repomirror", "ralph.sh"); try { if (syncAfter) { // Check if ralph.sh exists for continuous sync await fs.access(ralphScript); console.log(chalk.cyan("\n🔄 Starting continuous sync (ralph.sh)...")); console.log(chalk.yellow("Press Ctrl+C to stop")); const subprocess = execa("bash", [ralphScript], { stdio: "inherit", cwd: process.cwd(), }); // Handle graceful shutdown for subprocess const signalHandler = () => { console.log(chalk.yellow("\nStopping continuous sync...")); subprocess.kill("SIGINT"); }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); try { await subprocess; } finally { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); } } else { // Check if sync.sh exists for single sync await fs.access(syncScript); console.log(chalk.cyan("\n🔄 Running single sync (sync.sh)...")); const subprocess = execa("bash", [syncScript], { stdio: "inherit", cwd: process.cwd(), }); // Handle graceful shutdown for subprocess const signalHandler = () => { console.log(chalk.yellow("\nStopping sync...")); subprocess.kill("SIGINT"); }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); try { await subprocess; console.log(chalk.green("✅ Sync completed")); } finally { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); } } } catch (error) { if (error instanceof Error && (error as any).signal === "SIGINT") { console.log(chalk.yellow("\nStopped by user")); } else { throw new Error(`Sync failed: ${error}`); } } } async function performPull( config: RepoMirrorConfig, options: PullOptions, ): Promise { const { sourceRepo } = config; // Verify source directory exists try { await fs.access(sourceRepo); } catch { console.error( chalk.red(`Error: Source directory ${sourceRepo} does not exist`), ); process.exit(1); } // Check source repository status const repoStatus = await checkSourceRepoStatus(sourceRepo); if (!repoStatus.isGitRepo) { console.error( chalk.red( `Error: Source directory ${sourceRepo} is not a git repository`, ), ); process.exit(1); } if (!repoStatus.hasRemotes) { console.error( chalk.red("Error: Source repository has no configured remotes"), ); console.log(chalk.gray("Add a remote with: git remote add ")); process.exit(1); } if (repoStatus.hasUncommittedChanges) { console.log(chalk.yellow("⚠️ Source repository has uncommitted changes")); console.log( chalk.gray("Consider committing or stashing changes before pulling"), ); console.log(); } // Determine remote and branch to pull from const remoteName = config.pull?.source_remote || "upstream"; const remoteBranch = config.pull?.source_branch || "main"; console.log( chalk.cyan(`📡 Checking for changes from ${remoteName}/${remoteBranch}...`), ); try { // Get summary of remote changes const changesSummary = await getRemoteChangesSummary( sourceRepo, remoteName, remoteBranch, ); if (!changesSummary.hasNewCommits) { console.log(chalk.green("✅ Source repository is already up to date")); return; } // Show preview of incoming changes console.log( chalk.cyan(`\n📥 ${changesSummary.commitCount} new commit(s) available:`), ); changesSummary.commitMessages.forEach((message) => { console.log(chalk.gray(` • ${message}`)); }); if (changesSummary.commitCount > changesSummary.commitMessages.length) { const remaining = changesSummary.commitCount - changesSummary.commitMessages.length; console.log(chalk.gray(` ... and ${remaining} more commit(s)`)); } console.log(); // If this is just a check, return here if (options.check) { console.log( chalk.blue( "🔍 Check complete - use 'npx repomirror pull' to apply changes", ), ); return; } // Pull the changes const pullResult = await pullSourceChanges( sourceRepo, remoteName, remoteBranch, ); if (!pullResult.success) { if (pullResult.conflictsDetected) { console.log(chalk.red("\n❌ Merge conflicts detected")); console.log(chalk.yellow("Please resolve conflicts manually:")); console.log(chalk.gray("1. Navigate to source repository")); console.log(chalk.gray("2. Resolve conflicts in affected files")); console.log(chalk.gray("3. Run: git add . && git commit")); console.log(chalk.gray("4. Re-run: npx repomirror pull")); process.exit(1); } return; } // Trigger sync if requested or configured const shouldSync = options.syncAfter || (config.pull?.auto_sync && !options.sourceOnly); if (shouldSync && !options.sourceOnly) { await triggerSync(!!options.syncAfter); } else if (!options.sourceOnly) { console.log(chalk.blue("\n💡 Source changes pulled successfully")); console.log( chalk.gray("Run 'npx repomirror sync' to apply transformations"), ); } } catch (error) { throw new Error(`Pull operation failed: ${error}`); } } export async function pull(options: PullOptions = {}): Promise { const config = await loadConfig(); if (!config) { console.error( chalk.red( "Error: repomirror.yaml not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } try { await performPull(config, options); } catch (error) { console.error( chalk.red( `Pull failed: ${error instanceof Error ? error.message : String(error)}`, ), ); process.exit(1); } } ================================================ FILE: src/commands/push.ts ================================================ import { promises as fs } from "fs"; import { join } from "path"; import chalk from "chalk"; import ora from "ora"; import { execa } from "execa"; import yaml from "yaml"; interface RemoteConfig { url: string; branch: string; auto_push?: boolean; } interface RepoMirrorConfig { sourceRepo: string; targetRepo: string; transformationInstructions: string; remotes?: { [remoteName: string]: RemoteConfig; }; push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; } interface PushOptions { remote?: string; branch?: string; all?: boolean; dryRun?: boolean; } async function loadConfig(): Promise { try { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); return yaml.parse(configContent) as RepoMirrorConfig; } catch { return null; } } async function getGitStatus(targetRepo: string): Promise<{ hasChanges: boolean; stagedFiles: string[]; unstagedFiles: string[]; }> { try { // Check for staged changes const { stdout: stagedOutput } = await execa( "git", ["diff", "--cached", "--name-only"], { cwd: targetRepo }, ); const stagedFiles = stagedOutput.trim() ? stagedOutput.trim().split("\n") : []; // Check for unstaged changes const { stdout: unstagedOutput } = await execa( "git", ["diff", "--name-only"], { cwd: targetRepo }, ); const unstagedFiles = unstagedOutput.trim() ? unstagedOutput.trim().split("\n") : []; // Check for untracked files const { stdout: untrackedOutput } = await execa( "git", ["ls-files", "--others", "--exclude-standard"], { cwd: targetRepo }, ); const untrackedFiles = untrackedOutput.trim() ? untrackedOutput.trim().split("\n") : []; const hasChanges = stagedFiles.length > 0 || unstagedFiles.length > 0 || untrackedFiles.length > 0; return { hasChanges, stagedFiles, unstagedFiles: [...unstagedFiles, ...untrackedFiles], }; } catch (error) { throw new Error(`Failed to check git status: ${error}`); } } async function getSourceCommitHash(sourceRepo: string): Promise { try { const { stdout } = await execa("git", ["rev-parse", "HEAD"], { cwd: sourceRepo, }); return stdout.trim().substring(0, 7); // Short hash } catch { return null; // Source repo might not be a git repository } } async function generateCommitMessage( config: RepoMirrorConfig, sourceCommitHash: string | null, ): Promise { const prefix = config.push?.commit_prefix || "[repomirror]"; const instructions = config.transformationInstructions; // Create a concise summary of transformation let summary = "Apply transformations"; if (instructions.length < 80) { summary = instructions; } else { // Extract key transformation type from instructions const lowerInstructions = instructions.toLowerCase(); if ( lowerInstructions.includes("typescript") || lowerInstructions.includes("ts") ) { summary = "Convert to TypeScript"; } else if (lowerInstructions.includes("python")) { summary = "Convert to Python"; } else if (lowerInstructions.includes("react")) { summary = "Convert to React"; } else if (lowerInstructions.includes("vue")) { summary = "Convert to Vue"; } else { summary = "Apply code transformations"; } } let commitMessage = `${prefix} ${summary}`; if (sourceCommitHash) { commitMessage += ` (source: ${sourceCommitHash})`; } return commitMessage; } async function stageAndCommitChanges( targetRepo: string, commitMessage: string, ): Promise { const spinner = ora("Staging changes...").start(); try { // Add all changes to staging await execa("git", ["add", "."], { cwd: targetRepo }); spinner.succeed("Staged all changes"); // Create commit const commitSpinner = ora("Creating commit...").start(); await execa("git", ["commit", "-m", commitMessage], { cwd: targetRepo }); commitSpinner.succeed("Created commit successfully"); } catch (error) { spinner.fail("Failed to stage and commit changes"); throw new Error(`Git commit failed: ${error}`); } } async function pushToRemote( targetRepo: string, remoteName: string, remoteBranch: string, dryRun: boolean = false, ): Promise { const action = dryRun ? "dry-run push to" : "push to"; const spinner = ora( `Starting ${action} ${remoteName}/${remoteBranch}...`, ).start(); try { const args = ["push"]; if (dryRun) { args.push("--dry-run"); } args.push(remoteName, remoteBranch); const { stdout, stderr } = await execa("git", args, { cwd: targetRepo, timeout: 60000, // 60 second timeout }); if (dryRun) { spinner.succeed(`Dry run completed for ${remoteName}/${remoteBranch}`); if (stdout) { console.log(chalk.gray("Dry run output:")); console.log(chalk.gray(stdout)); } } else { spinner.succeed(`Successfully pushed to ${remoteName}/${remoteBranch}`); } // Show any informational output from git push if (stderr && !stderr.includes("error") && !stderr.includes("fatal")) { console.log(chalk.gray(stderr)); } } catch (error) { const actionText = dryRun ? "Dry run failed" : "Push failed"; spinner.fail(`${actionText} for ${remoteName}/${remoteBranch}`); if (error instanceof Error) { // Check for common authentication issues const errorMessage = error.message.toLowerCase(); if ( errorMessage.includes("authentication failed") || errorMessage.includes("permission denied") ) { console.log(chalk.yellow("\n🔐 Authentication Issue Detected:")); console.log( chalk.gray("• For HTTPS: Check your GitHub token or credentials"), ); console.log( chalk.gray( "• For SSH: Ensure your SSH key is added to your GitHub account", ), ); console.log( chalk.gray("• Test with: git push from the target directory"), ); } else if (errorMessage.includes("timeout")) { console.log( chalk.yellow("\n⏰ Push timed out - check your network connection"), ); } else if (errorMessage.includes("rejected")) { console.log( chalk.yellow("\n🚫 Push rejected - you may need to pull first"), ); console.log(chalk.gray("• Try: git pull from the target directory")); } } throw error; } } async function performPush( config: RepoMirrorConfig, options: PushOptions, ): Promise { const { targetRepo } = config; // Verify target directory exists and is a git repository try { await fs.access(targetRepo); await execa("git", ["rev-parse", "--git-dir"], { cwd: targetRepo }); } catch { console.error( chalk.red( `Error: Target directory ${targetRepo} is not a valid git repository`, ), ); process.exit(1); } // Check git status const gitStatus = await getGitStatus(targetRepo); if (!gitStatus.hasChanges) { console.log(chalk.yellow("No changes to commit")); return; } // Show what changes will be committed console.log(chalk.cyan("Changes to be pushed:")); if (gitStatus.stagedFiles.length > 0) { console.log(chalk.green(" Staged files:")); gitStatus.stagedFiles.forEach((file) => console.log(chalk.green(` + ${file}`)), ); } if (gitStatus.unstagedFiles.length > 0) { console.log(chalk.yellow(" Modified/untracked files:")); gitStatus.unstagedFiles.forEach((file) => console.log(chalk.yellow(` M ${file}`)), ); } console.log(); // Get source commit hash for commit message const sourceCommitHash = await getSourceCommitHash(config.sourceRepo); // Generate commit message const commitMessage = await generateCommitMessage(config, sourceCommitHash); console.log(chalk.gray(`Commit message: ${commitMessage}\n`)); // If dry run, skip committing if (!options.dryRun) { // Stage and commit changes await stageAndCommitChanges(targetRepo, commitMessage); } // Determine which remotes to push to const remotesToPush: Array<{ name: string; branch: string }> = []; if (options.all) { // Push to all configured remotes if (config.remotes) { Object.entries(config.remotes).forEach(([name, remote]) => { remotesToPush.push({ name, branch: remote.branch }); }); } } else { // Push to specific or default remote const remoteName = options.remote || config.push?.default_remote; const remoteBranch = options.branch || config.push?.default_branch; if (!remoteName) { console.error( chalk.red( "Error: No remote specified and no default remote configured", ), ); console.log( chalk.gray( "Use --remote or add a default remote with: npx repomirror remote add", ), ); process.exit(1); } if (!config.remotes?.[remoteName]) { console.error(chalk.red(`Error: Remote '${remoteName}' not found`)); console.log( chalk.gray("List configured remotes with: npx repomirror remote list"), ); process.exit(1); } const branch = remoteBranch || config.remotes[remoteName].branch; remotesToPush.push({ name: remoteName, branch }); } if (remotesToPush.length === 0) { console.log(chalk.yellow("No remotes configured for push")); console.log( chalk.gray("Add a remote with: npx repomirror remote add "), ); return; } // Push to each remote const errors: string[] = []; for (const remote of remotesToPush) { try { await pushToRemote( targetRepo, remote.name, remote.branch, options.dryRun, ); } catch (error) { errors.push(`${remote.name}/${remote.branch}: ${error}`); } } // Report results if (errors.length === 0) { const action = options.dryRun ? "Dry run completed" : "All pushes completed successfully"; console.log(chalk.green(`\n✅ ${action}`)); } else { console.log(chalk.red("\n❌ Some pushes failed:")); errors.forEach((error) => console.log(chalk.red(` • ${error}`))); process.exit(1); } } export async function push(options: PushOptions = {}): Promise { const config = await loadConfig(); if (!config) { console.error( chalk.red( "Error: repomirror.yaml not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } if (!config.remotes || Object.keys(config.remotes).length === 0) { console.error(chalk.red("Error: No remotes configured")); console.log( chalk.gray("Add a remote with: npx repomirror remote add "), ); process.exit(1); } try { await performPush(config, options); } catch (error) { console.error( chalk.red( `Push failed: ${error instanceof Error ? error.message : String(error)}`, ), ); process.exit(1); } } ================================================ FILE: src/commands/remote.ts ================================================ import { promises as fs } from "fs"; import { join } from "path"; import chalk from "chalk"; import ora from "ora"; import { execa } from "execa"; import yaml from "yaml"; interface RemoteConfig { url: string; branch: string; auto_push?: boolean; } interface RepoMirrorConfig { sourceRepo: string; targetRepo: string; transformationInstructions: string; remotes?: { [remoteName: string]: RemoteConfig; }; push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; } async function loadConfig(): Promise { try { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); return yaml.parse(configContent) as RepoMirrorConfig; } catch { return null; } } async function saveConfig(config: RepoMirrorConfig): Promise { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = yaml.stringify(config); await fs.writeFile(configPath, configContent, "utf-8"); } async function validateRemoteUrl(url: string): Promise { try { // Basic URL validation new URL(url); // Check if it's a valid git URL (basic patterns) const gitUrlPattern = /^(https?:\/\/|git@).+\.git$/i; const githubPattern = /^https?:\/\/github\.com\/.+\/.+/i; return gitUrlPattern.test(url) || githubPattern.test(url); } catch { return false; } } export async function remoteAdd( name: string, url: string, branch = "main", ): Promise { if (!name || !url) { console.error(chalk.red("Error: Remote name and URL are required")); process.exit(1); } // Validate remote URL if (!(await validateRemoteUrl(url))) { console.error(chalk.red(`Error: Invalid git URL: ${url}`)); console.log( chalk.gray( "Expected format: https://github.com/user/repo.git or git@github.com:user/repo.git", ), ); process.exit(1); } const config = await loadConfig(); if (!config) { console.error( chalk.red( "Error: repomirror.yaml not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } // Initialize remotes object if it doesn't exist if (!config.remotes) { config.remotes = {}; } // Check if remote already exists if (config.remotes[name]) { console.error(chalk.red(`Error: Remote '${name}' already exists`)); console.log(chalk.gray(`Current URL: ${config.remotes[name].url}`)); console.log( chalk.gray( "Use 'npx repomirror remote remove ' to remove it first", ), ); process.exit(1); } // Test remote accessibility (optional, non-blocking) const spinner = ora(`Testing remote accessibility for ${name}...`).start(); try { // Try to ls-remote to verify the URL is accessible await execa("git", ["ls-remote", "--heads", url], { timeout: 10000 }); spinner.succeed(`Remote ${name} is accessible`); } catch (error) { spinner.warn(`Warning: Could not verify remote accessibility`); console.log( chalk.gray(` This might be due to authentication or network issues`), ); console.log( chalk.gray( ` Remote will be added anyway - ensure you have proper access`, ), ); } // Add remote to configuration config.remotes[name] = { url, branch, auto_push: false, }; // Set as default remote if it's the first one if (!config.push) { config.push = {}; } if (!config.push.default_remote) { config.push.default_remote = name; } if (!config.push.default_branch) { config.push.default_branch = branch; } if (!config.push.commit_prefix) { config.push.commit_prefix = "[repomirror]"; } await saveConfig(config); console.log(chalk.green(`✅ Added remote '${name}'`)); console.log(chalk.gray(` URL: ${url}`)); console.log(chalk.gray(` Branch: ${branch}`)); if (config.push.default_remote === name) { console.log(chalk.gray(` Set as default remote for push operations`)); } } export async function remoteList(): Promise { const config = await loadConfig(); if (!config) { console.error( chalk.red( "Error: repomirror.yaml not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } if (!config.remotes || Object.keys(config.remotes).length === 0) { console.log(chalk.yellow("No remotes configured")); console.log( chalk.gray("Add a remote with: npx repomirror remote add "), ); return; } console.log(chalk.cyan("Configured remotes:")); console.log(); Object.entries(config.remotes).forEach(([name, remote]) => { const isDefault = config.push?.default_remote === name; const prefix = isDefault ? chalk.green("* ") : " "; console.log(`${prefix}${chalk.bold(name)}`); console.log(` URL: ${remote.url}`); console.log(` Branch: ${remote.branch}`); console.log(` Auto-push: ${remote.auto_push ? "enabled" : "disabled"}`); if (isDefault) { console.log(chalk.gray(" (default remote)")); } console.log(); }); if (config.push) { console.log(chalk.gray("Push settings:")); console.log( chalk.gray(` Default remote: ${config.push.default_remote || "none"}`), ); console.log( chalk.gray(` Default branch: ${config.push.default_branch || "none"}`), ); console.log( chalk.gray( ` Commit prefix: ${config.push.commit_prefix || "[repomirror]"}`, ), ); } } export async function remoteRemove(name: string): Promise { if (!name) { console.error(chalk.red("Error: Remote name is required")); process.exit(1); } const config = await loadConfig(); if (!config) { console.error( chalk.red( "Error: repomirror.yaml not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } if (!config.remotes || !config.remotes[name]) { console.error(chalk.red(`Error: Remote '${name}' not found`)); console.log(chalk.gray("List remotes with: npx repomirror remote list")); process.exit(1); } const remote = config.remotes[name]; delete config.remotes[name]; // Update default remote if we're removing it if (config.push?.default_remote === name) { const remainingRemotes = Object.keys(config.remotes); if (remainingRemotes.length > 0) { config.push.default_remote = remainingRemotes[0]; console.log( chalk.yellow(`Updated default remote to '${remainingRemotes[0]}'`), ); } else { delete config.push.default_remote; console.log(chalk.yellow("No default remote (no remotes remaining)")); } } await saveConfig(config); console.log(chalk.green(`✅ Removed remote '${name}'`)); console.log(chalk.gray(` URL: ${remote.url}`)); } export async function remote(action: string, ...args: string[]): Promise { switch (action) { case "add": if (args.length < 2) { console.error( chalk.red("Usage: npx repomirror remote add [branch]"), ); process.exit(1); } await remoteAdd(args[0], args[1], args[2]); break; case "list": await remoteList(); break; case "remove": case "rm": if (args.length < 1) { console.error(chalk.red("Usage: npx repomirror remote remove ")); process.exit(1); } await remoteRemove(args[0]); break; default: console.error(chalk.red(`Unknown remote action: ${action}`)); console.log(chalk.gray("Available actions:")); console.log( chalk.gray(" add [branch] - Add a remote repository"), ); console.log( chalk.gray(" list - List configured remotes"), ); console.log(chalk.gray(" remove - Remove a remote")); process.exit(1); } } ================================================ FILE: src/commands/setup-github-pr-sync.ts ================================================ import { promises as fs } from "fs"; import { join, resolve } from "path"; import inquirer from "inquirer"; import chalk from "chalk"; import ora from "ora"; import yaml from "yaml"; interface SetupGithubPrSyncOptions { targetRepo?: string; timesToLoop?: number; overwrite?: boolean; } interface RemoteConfig { url: string; branch: string; auto_push?: boolean; } interface RepoMirrorConfig { sourceRepo: string; targetRepo: string; transformationInstructions: string; remotes?: { [remoteName: string]: RemoteConfig; }; push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; "github-pr-sync"?: { targetRepo?: string; timesToLoop?: number; }; } const DEFAULT_WORKFLOW = `name: RepoMirror PR Sync on: workflow_dispatch: # Allow manual trigger push: branches: [ main ] paths: - '.repomirror/**' - 'repomirror.yaml' jobs: sync: runs-on: ubuntu-latest steps: - name: Checkout source repository uses: actions/checkout@v4 with: path: source - name: Checkout target repository uses: actions/checkout@v4 with: repository: {TARGET_REPO} token: $\{{ secrets.GITHUB_TOKEN }} path: target - name: Setup Node.js uses: actions/setup-node@v4 with: node-version: '20' - name: Install repomirror run: npm install -g repomirror - name: Setup Claude Code env: ANTHROPIC_API_KEY: $\{{ secrets.ANTHROPIC_API_KEY }} run: | # Setup Claude Code with API key echo "Setting up Claude Code..." # Configure Claude Code authentication for CI mkdir -p ~/.config/claude echo "api_key = \\"$ANTHROPIC_API_KEY\\"" > ~/.config/claude/config - name: Run RepoMirror sync loop working-directory: source env: SKIP_CLAUDE_TEST: true # Skip interactive Claude test in CI run: | # Run the sync-one command in a loop {TIMES_TO_LOOP} times for i in $(seq 1 {TIMES_TO_LOOP}); do echo "=== Sync iteration $i of {TIMES_TO_LOOP} ===" npx repomirror sync-one --auto-push || echo "Sync iteration $i failed, continuing..." if [ $i -lt {TIMES_TO_LOOP} ]; then echo "Sleeping 30 seconds before next iteration..." sleep 30 fi done - name: Push final changes to target working-directory: target run: | git config user.name "GitHub Actions" git config user.email "actions@github.com" if [ -n "$(git status --porcelain)" ]; then git add -A git commit -m "Automated PR sync from source repository [$(date)]" git push else echo "No changes to push" fi `; async function loadExistingConfig(): Promise | null> { try { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); return yaml.parse(configContent) as RepoMirrorConfig; } catch { return null; } } async function saveConfig(config: RepoMirrorConfig): Promise { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = yaml.stringify(config); await fs.writeFile(configPath, configContent, "utf-8"); } async function workflowExists(): Promise { try { const workflowPath = join(process.cwd(), ".github", "workflows", "repomirror.yml"); await fs.access(workflowPath); return true; } catch { return false; } } export async function setupGithubPrSync(options?: SetupGithubPrSyncOptions): Promise { console.log( chalk.cyan("I'll help you set up a github actions workflow that will run the sync-one command on every pr merge\n") ); // Check if repomirror.yaml exists const existingConfig = await loadExistingConfig(); if (!existingConfig) { console.error(chalk.red("Error: repomirror.yaml not found")); console.log(chalk.yellow("Please run 'npx repomirror init' first")); process.exit(1); } // Load existing GitHub PR sync defaults from config const existingGithubPrSyncConfig = existingConfig["github-pr-sync"] || {}; // Merge CLI options, existing config, and defaults const defaults = { targetRepo: options?.targetRepo || existingGithubPrSyncConfig.targetRepo || "", timesToLoop: options?.timesToLoop || existingGithubPrSyncConfig.timesToLoop || 3, }; // Check if workflow already exists const exists = await workflowExists(); if (exists && !options?.overwrite) { const { shouldOverwrite } = await inquirer.prompt([ { type: "confirm", name: "shouldOverwrite", message: "GitHub Actions workflow already exists. Do you want to overwrite it?", default: false, }, ]); if (!shouldOverwrite) { console.log(chalk.yellow("Exiting without making changes.")); process.exit(0); } } // Prompt for configuration const answers = await inquirer.prompt([ { type: "input", name: "targetRepo", message: "Target repo, e.g. repomirrorhq/repomirror:", default: defaults.targetRepo, when: !options?.targetRepo, validate: (input) => { if (!input || !input.includes("/")) { return "Please provide the GitHub repository in owner/repo format"; } return true; }, }, { type: "input", name: "timesToLoop", message: "Times to loop (advanced, recommend 3):", default: defaults.timesToLoop.toString(), when: !options?.timesToLoop, validate: (input) => { const num = parseInt(input); if (isNaN(num) || num < 1 || num > 10) { return "Please enter a number between 1 and 10"; } return true; }, filter: (input) => parseInt(input), }, ]); // Merge final configuration const finalConfig = { targetRepo: options?.targetRepo || answers.targetRepo, timesToLoop: options?.timesToLoop || answers.timesToLoop, }; // Update and save repomirror.yaml with GitHub PR sync settings const updatedConfig: RepoMirrorConfig = { sourceRepo: existingConfig.sourceRepo || "./", targetRepo: existingConfig.targetRepo || "../transformed", transformationInstructions: existingConfig.transformationInstructions || "transform the repository", ...existingConfig, "github-pr-sync": { targetRepo: finalConfig.targetRepo, timesToLoop: finalConfig.timesToLoop, }, }; await saveConfig(updatedConfig); console.log(chalk.green("✅ Updated repomirror.yaml with GitHub PR sync settings")); // Create .github/workflows directory if it doesn't exist const workflowDir = join(process.cwd(), ".github", "workflows"); await fs.mkdir(workflowDir, { recursive: true }); // Generate workflow content const workflowContent = DEFAULT_WORKFLOW .replace(/{TARGET_REPO}/g, finalConfig.targetRepo) .replace(/{TIMES_TO_LOOP}/g, finalConfig.timesToLoop.toString()); // Write workflow file const workflowPath = join(workflowDir, "repomirror.yml"); const spinner = ora("Creating GitHub Actions workflow...").start(); try { await fs.writeFile(workflowPath, workflowContent); spinner.succeed("GitHub Actions workflow created"); console.log(chalk.green(`\n✅ Workflow created at ${workflowPath}`)); console.log(chalk.cyan("\nNext steps:")); console.log(chalk.white("• push to github")); console.log(chalk.white("• add secrets for ANTHROPIC_API_KEY and GITHUB_TOKEN, where GITHUB_TOKEN has read/push access to the target repo")); console.log(chalk.yellow("\n⚠️ Important:")); console.log(chalk.yellow("Make sure to set up the required GitHub secrets:")); console.log(chalk.gray(" - ANTHROPIC_API_KEY: Your Anthropic API key for Claude")); console.log(chalk.gray(" - GITHUB_TOKEN: Already provided by GitHub Actions (ensure repo permissions)")); } catch (error) { spinner.fail("Failed to create workflow"); console.error(chalk.red(`Error: ${error instanceof Error ? error.message : String(error)}`)); process.exit(1); } } ================================================ FILE: src/commands/sync-forever.ts ================================================ import { execa } from "execa"; import chalk from "chalk"; import { join } from "path"; import { promises as fs } from "fs"; import { sync } from "./sync"; export async function syncForever(options?: { autoPush?: boolean }): Promise { const ralphScript = join(process.cwd(), ".repomirror", "ralph.sh"); const syncScript = join(process.cwd(), ".repomirror", "sync.sh"); // Check if scripts exist let ralphExists = false; let syncExists = false; try { await fs.access(ralphScript); ralphExists = true; } catch { // ralph.sh doesn't exist } try { await fs.access(syncScript); syncExists = true; } catch { // sync.sh doesn't exist } // For strict backward compatibility: without --auto-push, always require ralph.sh if (!options?.autoPush && !ralphExists) { console.error( chalk.red( "Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } // If auto-push is requested, we need to use the new sync() approach // If ralph.sh exists and no auto-push, use legacy approach if (options?.autoPush || !ralphExists) { if (!syncExists) { console.error( chalk.red( "Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } console.log(chalk.cyan("Running continuous sync...")); if (options?.autoPush) { console.log(chalk.cyan("Auto-push is enabled")); } console.log(chalk.yellow("Press Ctrl+C to stop")); let isRunning = true; // Handle graceful shutdown const signalHandler = () => { console.log(chalk.yellow("\nStopping continuous sync...")); isRunning = false; }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); try { while (isRunning) { try { await sync(options?.autoPush ? { autoPush: options.autoPush } : undefined); if (isRunning) { console.log(chalk.gray("===SLEEP===")); console.log(chalk.gray("looping")); // Sleep for 10 seconds, but check for stop condition every second for (let i = 0; i < 10 && isRunning; i++) { await new Promise(resolve => setTimeout(resolve, 1000)); } } } catch (error) { console.error( chalk.red( `Sync iteration failed: ${error instanceof Error ? error.message : String(error)}`, ), ); console.log(chalk.gray("Continuing with next iteration...")); // Sleep for 10 seconds before retrying for (let i = 0; i < 10 && isRunning; i++) { await new Promise(resolve => setTimeout(resolve, 1000)); } } } } finally { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); console.log(chalk.yellow("Stopped by user")); } } else { // Use legacy ralph.sh approach if (!ralphExists) { console.error( chalk.red( "Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } console.log(chalk.cyan("Running ralph.sh (continuous sync)...")); console.log(chalk.yellow("Press Ctrl+C to stop")); const subprocess = execa("bash", [ralphScript], { stdio: "inherit", cwd: process.cwd(), }); // Handle graceful shutdown for subprocess const signalHandler = () => { console.log(chalk.yellow("\nStopping continuous sync...")); subprocess.kill("SIGINT"); }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); try { await subprocess; } catch (error) { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); if (error instanceof Error && (error as any).signal === "SIGINT") { console.log(chalk.yellow("Stopped by user")); } else { console.error( chalk.red( `Sync forever failed: ${error instanceof Error ? error.message : String(error)}`, ), ); process.exit(1); } } } } ================================================ FILE: src/commands/sync-one.ts ================================================ import { sync } from "./sync"; // sync-one is just an alias for sync export async function syncOne(options?: { autoPush?: boolean }): Promise { if (options) { await sync(options); } else { await sync(); } } ================================================ FILE: src/commands/sync.ts ================================================ import { execa } from "execa"; import chalk from "chalk"; import { join } from "path"; import { promises as fs } from "fs"; import yaml from "yaml"; import { push } from "./push"; interface RemoteConfig { url: string; branch: string; auto_push?: boolean; } interface RepoMirrorConfig { sourceRepo: string; targetRepo: string; transformationInstructions: string; remotes?: { [remoteName: string]: RemoteConfig; }; push?: { default_remote?: string; default_branch?: string; commit_prefix?: string; }; pull?: { auto_sync?: boolean; source_remote?: string; source_branch?: string; }; } async function loadConfig(): Promise { try { const configPath = join(process.cwd(), "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); return yaml.parse(configContent) as RepoMirrorConfig; } catch { return null; } } async function performAutoPush(config: RepoMirrorConfig, cliAutoPush: boolean): Promise { if (!cliAutoPush && !config.remotes) { return; } // Find remotes with auto_push enabled const autoPushRemotes: string[] = []; if (config.remotes) { for (const [remoteName, remoteConfig] of Object.entries(config.remotes)) { if (cliAutoPush || remoteConfig.auto_push) { autoPushRemotes.push(remoteName); } } } if (autoPushRemotes.length === 0) { return; } console.log(chalk.cyan("\n🚀 Auto-push enabled - pushing to configured remotes...")); try { if (cliAutoPush) { // Push to all remotes when --auto-push is used await push({ all: true }); } else { // Push only to remotes with auto_push enabled for (const remoteName of autoPushRemotes) { await push({ remote: remoteName }); } } console.log(chalk.green("✅ Auto-push completed successfully")); } catch (error) { // Log the error but don't break the sync workflow console.log(chalk.yellow("⚠️ Auto-push failed, but sync completed successfully:")); console.log(chalk.red(` ${error instanceof Error ? error.message : String(error)}`)); console.log(chalk.gray(" You can manually push using: npx repomirror push")); } } export async function sync(options?: { autoPush?: boolean }): Promise { const syncScript = join(process.cwd(), ".repomirror", "sync.sh"); try { // Check if sync.sh exists await fs.access(syncScript); } catch { console.error( chalk.red( "Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.", ), ); process.exit(1); } console.log(chalk.cyan("Running sync.sh...")); const subprocess = execa("bash", [syncScript], { stdio: "inherit", cwd: process.cwd(), }); // Handle graceful shutdown for subprocess const signalHandler = () => { console.log(chalk.yellow("\nStopping sync...")); subprocess.kill("SIGINT"); }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); try { await subprocess; console.log(chalk.green("Sync completed successfully")); // Check for auto-push after successful sync const config = await loadConfig(); if (config && (options?.autoPush || config.remotes)) { await performAutoPush(config, options?.autoPush || false); } } catch (error) { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); if (error instanceof Error && (error as any).signal === "SIGINT") { console.log(chalk.yellow("\nSync stopped by user")); process.exit(0); } console.error( chalk.red( `Sync failed: ${error instanceof Error ? error.message : String(error)}`, ), ); process.exit(1); } finally { // Clean up signal handlers process.off('SIGINT', signalHandler); process.off('SIGTERM', signalHandler); } } ================================================ FILE: src/commands/visualize.ts ================================================ import { createInterface } from "node:readline"; const colors = { reset: "\x1b[0m", bright: "\x1b[1m", dim: "\x1b[2m", red: "\x1b[31m", green: "\x1b[32m", yellow: "\x1b[33m", blue: "\x1b[34m", magenta: "\x1b[35m", cyan: "\x1b[36m", }; function getTypeColor(type: string): string { switch (type) { case "system": return colors.magenta; case "user": return colors.blue; case "assistant": return colors.green; case "tool_use": return colors.cyan; case "tool_result": return colors.yellow; case "message": return colors.dim; case "text": return colors.reset; default: return colors.reset; } } interface Todo { status: string; content: string; priority?: string; } function formatTodoList(todos: Todo[]): string { let output = `📋 ${colors.bright}${colors.cyan}Todo List Update${colors.reset}\n`; const statusColors = { completed: colors.dim + colors.green, in_progress: colors.bright + colors.yellow, pending: colors.reset, }; const statusIcons = { completed: "✅", in_progress: "🔄", pending: "⏸️", }; const priorityColors = { high: colors.red, medium: colors.yellow, low: colors.dim, }; todos.forEach((todo, index) => { const statusColor = statusColors[todo.status as keyof typeof statusColors] || colors.reset; const statusIcon = statusIcons[todo.status as keyof typeof statusIcons] || "❓"; const priorityColor = priorityColors[todo.priority as keyof typeof priorityColors] || colors.reset; const checkbox = todo.status === "completed" ? "☑️" : "☐"; output += ` ${checkbox} ${statusIcon} ${statusColor}${todo.content}${colors.reset}`; output += ` ${priorityColor}[${todo.priority}]${colors.reset}`; if (todo.status === "in_progress") { output += ` ${colors.bright}${colors.yellow}← ACTIVE${colors.reset}`; } output += "\n"; }); // Add summary stats const completed = todos.filter((t) => t.status === "completed").length; const inProgress = todos.filter((t) => t.status === "in_progress").length; const pending = todos.filter((t) => t.status === "pending").length; output += `\n ${colors.dim}📊 Progress: ${colors.green}${completed} completed${colors.reset}`; output += `${colors.dim}, ${colors.yellow}${inProgress} active${colors.reset}`; output += `${colors.dim}, ${colors.reset}${pending} pending${colors.reset}`; output += `${colors.dim} (${Math.round((completed / todos.length) * 100)}% done)${colors.reset}`; return output; } function formatConcise(json: any): string { const type = json.type || "unknown"; const typeColor = getTypeColor(type); let output = `⏺ ${typeColor}${type.charAt(0).toUpperCase() + type.slice(1)}${colors.reset}`; // Special handling for TodoWrite calls if ( type === "assistant" && json.message?.content?.[0]?.name === "TodoWrite" ) { const toolInput = json.message.content[0].input; if (toolInput?.todos && Array.isArray(toolInput.todos)) { return formatTodoList(toolInput.todos); } } // Add context based on type if (type === "assistant" && json.message?.content?.[0]?.name) { const toolName = json.message.content[0].name; const toolInput = json.message.content[0].input; // Format tool name with key arguments let toolDisplay = `${colors.cyan}${toolName}${colors.reset}`; if (toolInput) { const keyArgs = []; // Extract the most important argument for each tool type if (toolInput.file_path) keyArgs.push(toolInput.file_path); else if (toolInput.path) keyArgs.push(toolInput.path); else if (toolInput.pattern) keyArgs.push(`"${toolInput.pattern}"`); else if (toolInput.command) keyArgs.push(toolInput.command); else if (toolInput.cmd) keyArgs.push(toolInput.cmd); else if (toolInput.query) keyArgs.push(`"${toolInput.query}"`); else if (toolInput.description) keyArgs.push(toolInput.description); else if (toolInput.prompt) keyArgs.push(`"${toolInput.prompt.substring(0, 30)}..."`); else if (toolInput.url) keyArgs.push(toolInput.url); if (keyArgs.length > 0) { toolDisplay += `(${colors.green}${keyArgs[0]}${colors.reset})`; } } output = `⏺ ${toolDisplay}`; // Show additional arguments on next lines for complex tools if (toolInput) { const additionalArgs = []; if (toolName === "Bash" && toolInput.cwd) { additionalArgs.push(`cwd: ${toolInput.cwd}`); } if (toolInput.limit) additionalArgs.push(`limit: ${toolInput.limit}`); if (toolInput.offset) additionalArgs.push(`offset: ${toolInput.offset}`); if (toolInput.include) additionalArgs.push(`include: ${toolInput.include}`); if (toolInput.old_string && toolInput.new_string) { additionalArgs.push( `replace: "${toolInput.old_string.substring(0, 20)}..." → "${toolInput.new_string.substring(0, 20)}..."`, ); } if (toolInput.timeout) additionalArgs.push(`timeout: ${toolInput.timeout}ms`); if (additionalArgs.length > 0) { output += `\n ⎿ ${colors.dim}${additionalArgs.join(", ")}${colors.reset}`; } } } else if (type === "tool_result" && json.name) { output += `(${colors.cyan}${json.name}${colors.reset})`; } else if (type === "user" && json.message?.content?.[0]) { const content = json.message.content[0]; if (content.type === "tool_result") { // Override the type display for tool results output = `⏺ ${colors.yellow}Tool Result${colors.reset}`; // Show result summary and first 2 lines if (content.content) { const resultText = typeof content.content === "string" ? content.content : JSON.stringify(content.content); const lines = resultText.split("\n"); const chars = resultText.length; output += `\n ⎿ ${colors.dim}${lines.length} lines, ${chars} chars${colors.reset}`; if (content.is_error) { output += ` ${colors.red}ERROR${colors.reset}`; } // Show first 2 lines of content if (lines.length > 0 && lines[0].trim()) { output += `\n ⎿ ${colors.reset}${lines[0]}${colors.reset}`; } if (lines.length > 1 && lines[1].trim()) { output += `\n ${colors.dim}${lines[1]}${colors.reset}`; } } } else if (content.text) { const text = content.text.substring(0, 50); output += `: ${colors.dim}${text}${text.length === 50 ? "..." : ""}${colors.reset}`; } } else if (type === "system" && json.subtype) { output += `(${colors.dim}${json.subtype}${colors.reset})`; } // Show assistant message content if it exists if (type === "assistant" && json.message?.content) { const textContent = json.message.content.find( (c: any) => c.type === "text", ); if (textContent?.text) { const lines = textContent.text.split("\n").slice(0, 3); // Show first 3 lines output += `\n ⎿ ${colors.reset}${lines[0]}${colors.reset}`; if (lines.length > 1) { output += `\n ${colors.dim}${lines[1]}${colors.reset}`; } if (lines.length > 2) { output += `\n ${colors.dim}${lines[2]}${colors.reset}`; } if (textContent.text.split("\n").length > 3) { output += `\n ${colors.dim}...${colors.reset}`; } } } // Add summary line let summary = ""; if (json.message?.usage) { const usage = json.message.usage; summary = `${usage.input_tokens || 0}/${usage.output_tokens || 0} tokens`; } else if (json.output && typeof json.output === "string") { summary = `${json.output.length} chars output`; } else if (json.message?.content?.length) { summary = `${json.message.content.length} content items`; } else if (json.tools?.length) { summary = `${json.tools.length} tools available`; } if (summary) { output += `\n ⎿ ${colors.dim}${summary}${colors.reset}`; } return output; } function displayToolCallWithResult( toolCall: any, toolCallJson: any, toolResultJson: any, callTimestamp: string, resultTimestamp: string, ) { // Display the tool call header process.stdout.write(`${callTimestamp}${formatConcise(toolCallJson)}\n`); // Display the result const toolResult = toolResultJson.message.content[0]; const isError = toolResult.is_error; const resultIcon = isError ? "❌" : "✅"; const resultColor = isError ? colors.red : colors.green; process.stdout.write( ` ${resultTimestamp}${resultIcon} ${resultColor}Tool Result${colors.reset}`, ); if (toolResult.content) { const resultText = typeof toolResult.content === "string" ? toolResult.content : JSON.stringify(toolResult.content); const lines = resultText.split("\n"); const chars = resultText.length; process.stdout.write( ` ${colors.dim}(${lines.length} lines, ${chars} chars)${colors.reset}`, ); if (isError) { process.stdout.write(` ${colors.red}ERROR${colors.reset}`); } // Show first few lines of result const linesToShow = Math.min(3, lines.length); for (let i = 0; i < linesToShow; i++) { if (lines[i].trim()) { const lineColor = i === 0 ? colors.reset : colors.dim; process.stdout.write(`\n ⎿ ${lineColor}${lines[i]}${colors.reset}`); } } if (lines.length > linesToShow) { process.stdout.write( `\n ⎿ ${colors.dim}... ${lines.length - linesToShow} more lines${colors.reset}`, ); } } process.stdout.write("\n\n"); } export async function visualize( options: { debug?: boolean } = {}, ): Promise { const rl = createInterface({ input: process.stdin, crlfDelay: Infinity, }); const debugMode = options.debug || process.argv.includes("--debug"); const toolCalls = new Map(); // Store tool calls by their ID const pendingResults = new Map(); // Store results waiting for their tool calls let lastLine: any = null; // Track the last line to detect final message let isLastAssistantMessage = false; // Handle graceful shutdown const signalHandler = () => { console.log("\n" + colors.yellow + "Stopping visualizer..." + colors.reset); rl.close(); process.exit(0); }; process.on('SIGINT', signalHandler); process.on('SIGTERM', signalHandler); rl.on("line", (line) => { if (line.trim()) { const timestamp = debugMode ? `${colors.dim}[${new Date().toISOString()}]${colors.reset} ` : ""; try { const json = JSON.parse(line); // Check if this is a tool call if (json.type === "assistant" && json.message?.content?.[0]?.id) { const toolCall = json.message.content[0]; const toolId = toolCall.id; // Store the tool call toolCalls.set(toolId, { toolCall: json, timestamp: timestamp, }); // Check if we have a pending result for this tool call if (pendingResults.has(toolId)) { const result = pendingResults.get(toolId); displayToolCallWithResult( toolCall, json, result.toolResult, result.timestamp, timestamp, ); pendingResults.delete(toolId); } else { // Display the tool call and mark it as pending process.stdout.write(`${timestamp + formatConcise(json)}\n`); process.stdout.write( `${colors.dim} ⎿ Waiting for result...${colors.reset}\n\n`, ); } } // Check if this is a tool result else if ( json.type === "user" && json.message?.content?.[0]?.type === "tool_result" ) { const toolResult = json.message.content[0]; const toolId = toolResult.tool_use_id; if (toolCalls.has(toolId)) { // We have the matching tool call, display them together const stored = toolCalls.get(toolId); displayToolCallWithResult( stored.toolCall.message.content[0], stored.toolCall, json, stored.timestamp, timestamp, ); toolCalls.delete(toolId); } else { // Store the result and wait for the tool call pendingResults.set(toolId, { toolResult: json, timestamp: timestamp, }); } } // Check if this is the result message and display full content else if (json.type === "result" && json.result) { process.stdout.write(`${timestamp + formatConcise(json)}\n\n`); process.stdout.write( `${colors.bright}${colors.green}=== Final Result ===${colors.reset}\n\n`, ); process.stdout.write(`${json.result}\n`); } // For all other message types, display normally else { process.stdout.write(`${timestamp + formatConcise(json)}\n\n`); } // Track if this might be the last assistant message lastLine = json; isLastAssistantMessage = json.type === "assistant" && !json.message?.content?.[0]?.id; } catch (_error) { process.stdout.write( `${timestamp}${colors.red}⏺ Parse Error${colors.reset}\n`, ); process.stdout.write( ` ⎿ ${colors.dim}${line.substring(0, 50)}...${colors.reset}\n\n`, ); } } }); rl.on("close", () => { // If the last message was an assistant message (not a tool call), display the full content if (isLastAssistantMessage && lastLine?.message?.content?.[0]?.text) { process.stdout.write( `\n${colors.bright}${colors.green}=== Final Assistant Message ===${colors.reset}\n\n`, ); process.stdout.write(`${lastLine.message.content[0].text}\n`); } }); } ================================================ FILE: src/templates/gitignore.template ================================================ claude_output.jsonl ================================================ FILE: src/templates/ralph.sh.template ================================================ #!/bin/bash while :; do ./.repomirror/sync.sh echo -e "===SLEEP===\n===SLEEP===\n"; echo 'looping'; sleep 10; done ================================================ FILE: src/templates/sync.sh.template ================================================ #!/bin/bash cat .repomirror/prompt.md | \ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir ${targetRepo} | \ tee -a .repomirror/claude_output.jsonl | \ npx repomirror visualize --debug; ================================================ FILE: test-resolve.js ================================================ const path = require('path'); // Mock process.cwd() const mockCwd = '/tmp/test-dir'; process.cwd = () => mockCwd; const sourceRepo = './source'; // Our current logic const baseDir1 = sourceRepo && sourceRepo !== './' ? path.resolve(process.cwd(), sourceRepo) : process.cwd(); console.log('sourceRepo:', sourceRepo); console.log('process.cwd():', process.cwd()); console.log('baseDir:', baseDir1); console.log('Expected files at:', path.join(baseDir1, '.repomirror')); ================================================ FILE: tests/README.md ================================================ # Test Suite for repomirror This directory contains the test suite for the repomirror project using Vitest. ## Structure ``` tests/ ├── README.md # This file ├── setup.ts # Global test setup ├── basic.test.ts # Basic functionality tests ├── commands/ # Command-specific tests │ └── simple.test.ts # Example command tests └── helpers/ # Test utilities and helpers ├── index.ts # Helper exports ├── test-utils.ts # Test utility functions └── fixtures.ts # Mock data and fixtures ``` ## Configuration The test configuration is defined in `vitest.config.ts` in the project root: - **Environment**: Node.js - **Coverage**: V8 provider with HTML and JSON reports - **TypeScript**: Full TypeScript support with path aliases - **Coverage Thresholds**: 80% for branches, functions, lines, and statements ## Running Tests ```bash # Run all tests npm test # Run tests with coverage npm test -- --coverage # Run specific test file npx vitest run tests/basic.test.ts # Run tests in watch mode npx vitest ``` ## Test Utilities The `helpers/` directory provides utilities for testing: ### `test-utils.ts` - `createTempDir()` - Create temporary directories for testing - `cleanupTempDir()` - Clean up temporary directories - `createMockGitRepo()` - Create mock git repositories - `mockConsole()` - Mock console methods - `mockProcess()` - Mock process methods - `mockInquirer()` - Mock inquirer prompts - `mockOra()` - Mock ora spinners - `mockExeca()` - Mock command execution ### `fixtures.ts` - Pre-defined mock data for consistent testing - Sample repository configurations - Mock command responses - File structure templates ## Writing Tests Example test structure: ```typescript import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; import { createTempDir, cleanupTempDir, mockConsole } from "../helpers"; describe("your feature", () => { let tempDir: string; beforeEach(async () => { tempDir = await createTempDir(); // Setup test environment }); afterEach(async () => { await cleanupTempDir(tempDir); vi.restoreAllMocks(); }); it("should do something", async () => { // Your test code here expect(true).toBe(true); }); }); ``` ## Coverage Coverage reports are generated in the `coverage/` directory: - HTML report: `coverage/index.html` - JSON report: `coverage/coverage.json` The project maintains coverage thresholds of 80% across all metrics. ## TypeScript Support Tests have full TypeScript support with: - Path aliases: `@/` for `src/`, `@tests/` for `tests/` - Type checking during test runs - Import resolution for both source and test files ## Integration with CI/CD The test suite is designed to work with continuous integration: - Exit codes properly indicate success/failure - Coverage reports can be uploaded to coverage services - Tests run in isolated environments ================================================ FILE: tests/basic.test.ts ================================================ import { describe, it, expect } from "vitest"; describe("basic test", () => { it("should pass a simple test", () => { expect(1 + 1).toBe(2); }); it("should work with async functions", async () => { const result = await Promise.resolve("hello"); expect(result).toBe("hello"); }); it("should work with TypeScript types", () => { const obj: { name: string; age: number } = { name: "test", age: 42 }; expect(obj.name).toBe("test"); expect(obj.age).toBe(42); }); }); ================================================ FILE: tests/commands/dispatch-sync.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { execa } from "execa"; import inquirer from "inquirer"; import chalk from "chalk"; import { dispatchSync } from "../../src/commands/dispatch-sync"; // Mock dependencies vi.mock("fs", () => ({ promises: { access: vi.fn(), }, })); vi.mock("execa"); vi.mock("inquirer"); vi.mock("chalk", () => ({ default: { red: vi.fn((text) => text), green: vi.fn((text) => text), yellow: vi.fn((text) => text), cyan: vi.fn((text) => text), gray: vi.fn((text) => text), }, })); vi.mock("ora", () => ({ default: vi.fn(() => ({ start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), warn: vi.fn().mockReturnThis(), })), })); // Mock console methods const mockConsoleLog = vi.spyOn(console, "log").mockImplementation(() => {}); const mockConsoleError = vi.spyOn(console, "error").mockImplementation(() => {}); const mockProcessExit = vi.spyOn(process, "exit").mockImplementation((code?: number) => { throw new Error(`process.exit unexpectedly called with "${code}"`); return undefined as never; }); describe("dispatch-sync command", () => { beforeEach(() => { vi.clearAllMocks(); }); afterEach(() => { vi.restoreAllMocks(); }); describe("flag validation", () => { it("should exit with error when --quiet is used without --yes", async () => { await expect(() => dispatchSync({ quiet: true, yes: false })).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should allow --yes without --quiet", async () => { // Mock workflow exists vi.mocked(fs.access).mockResolvedValue(undefined); // Mock gh CLI installed vi.mocked(execa).mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any); // Mock git remote origin URL vi.mocked(execa).mockResolvedValueOnce({ stdout: "https://github.com/testowner/testrepo.git", stderr: "" } as any); // Mock workflow dispatch vi.mocked(execa).mockResolvedValueOnce({ stdout: "workflow dispatched", stderr: "" } as any); await expect(dispatchSync({ yes: true, quiet: false })).resolves.toBeUndefined(); }); it("should allow --yes and --quiet together", async () => { // Mock workflow exists vi.mocked(fs.access).mockResolvedValue(undefined); // Mock gh CLI installed vi.mocked(execa).mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any); // Mock git remote origin URL vi.mocked(execa).mockResolvedValueOnce({ stdout: "https://github.com/testowner/testrepo.git", stderr: "" } as any); // Mock workflow dispatch vi.mocked(execa).mockResolvedValueOnce({ stdout: "workflow dispatched", stderr: "" } as any); await expect(dispatchSync({ yes: true, quiet: true })).resolves.toBeUndefined(); }); }); describe("prerequisite checks", () => { it("should exit when workflow file doesn't exist", async () => { vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT")); await expect(() => dispatchSync()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should exit when gh CLI is not installed", async () => { // Mock workflow exists vi.mocked(fs.access).mockResolvedValue(undefined); // Mock gh CLI not installed vi.mocked(execa).mockRejectedValueOnce(new Error("command not found")); await expect(() => dispatchSync()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should exit when git repository info cannot be determined", async () => { // Mock workflow exists vi.mocked(fs.access).mockResolvedValue(undefined); // Mock gh CLI installed vi.mocked(execa).mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any); // Mock git remote origin URL failure vi.mocked(execa).mockRejectedValueOnce(new Error("no remote origin")); await expect(() => dispatchSync()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); }); describe("user confirmation", () => { beforeEach(() => { // Mock all prerequisite checks pass vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any) // gh --version .mockResolvedValueOnce({ stdout: "https://github.com/testowner/testrepo.git", stderr: "" } as any); // git config --get remote.origin.url }); it("should prompt for confirmation when --yes is not provided", async () => { // Mock user confirms vi.mocked(inquirer.prompt).mockResolvedValue({ shouldProceed: true }); // Mock workflow dispatch vi.mocked(execa).mockResolvedValueOnce({ stdout: "workflow dispatched", stderr: "" } as any); await dispatchSync(); expect(inquirer.prompt).toHaveBeenCalledWith([ { type: "confirm", name: "shouldProceed", message: "Do you want to dispatch the workflow?", default: true, }, ]); }); it("should exit when user declines confirmation", async () => { // Mock user declines vi.mocked(inquirer.prompt).mockResolvedValue({ shouldProceed: false }); await expect(() => dispatchSync()).rejects.toThrow("process.exit unexpectedly called with \"0\""); }); it("should skip confirmation when --yes is provided", async () => { // Mock workflow dispatch vi.mocked(execa).mockResolvedValueOnce({ stdout: "workflow dispatched", stderr: "" } as any); await dispatchSync({ yes: true }); expect(inquirer.prompt).not.toHaveBeenCalled(); }); }); describe("workflow dispatch", () => { beforeEach(() => { // Mock all prerequisite checks pass vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any) // gh --version .mockResolvedValueOnce({ stdout: "https://github.com/testowner/testrepo.git", stderr: "" } as any); // git config --get remote.origin.url }); it("should successfully dispatch workflow", async () => { // Mock workflow dispatch vi.mocked(execa).mockResolvedValueOnce({ stdout: "workflow dispatched", stderr: "" } as any); await dispatchSync({ yes: true }); expect(vi.mocked(execa)).toHaveBeenCalledWith("gh", [ "workflow", "run", "repomirror.yml", "--repo", "testowner/testrepo", ]); }); it("should handle workflow dispatch failure", async () => { // Mock workflow dispatch failure vi.mocked(execa).mockRejectedValueOnce(new Error("workflow not found")); await expect(() => dispatchSync({ yes: true })).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should handle authentication errors", async () => { // Mock authentication error vi.mocked(execa).mockRejectedValueOnce(new Error("authentication failed")); await expect(() => dispatchSync({ yes: true })).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should handle workflow not found errors", async () => { // Mock workflow not found error vi.mocked(execa).mockRejectedValueOnce(new Error("not found")); await expect(() => dispatchSync({ yes: true })).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); }); describe("repository URL parsing", () => { beforeEach(() => { vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa).mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any); }); it("should parse HTTPS GitHub URL correctly", async () => { vi.mocked(execa).mockResolvedValueOnce({ stdout: "https://github.com/owner/repo.git", stderr: "" } as any); vi.mocked(execa).mockResolvedValueOnce({ stdout: "dispatched", stderr: "" } as any); await dispatchSync({ yes: true }); expect(vi.mocked(execa)).toHaveBeenCalledWith("gh", [ "workflow", "run", "repomirror.yml", "--repo", "owner/repo", ]); }); it("should parse SSH GitHub URL correctly", async () => { vi.mocked(execa).mockResolvedValueOnce({ stdout: "git@github.com:owner/repo.git", stderr: "" } as any); vi.mocked(execa).mockResolvedValueOnce({ stdout: "dispatched", stderr: "" } as any); await dispatchSync({ yes: true }); expect(vi.mocked(execa)).toHaveBeenCalledWith("gh", [ "workflow", "run", "repomirror.yml", "--repo", "owner/repo", ]); }); }); describe("output modes", () => { beforeEach(() => { // Mock all prerequisite checks pass vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: "gh version 2.0.0", stderr: "" } as any) .mockResolvedValueOnce({ stdout: "https://github.com/testowner/testrepo.git", stderr: "" } as any) .mockResolvedValueOnce({ stdout: "workflow dispatched", stderr: "" } as any); }); it("should complete successfully in normal mode", async () => { await expect(dispatchSync({ yes: true })).resolves.toBeUndefined(); }); it("should complete successfully in quiet mode", async () => { await expect(dispatchSync({ yes: true, quiet: true })).resolves.toBeUndefined(); }); }); }); ================================================ FILE: tests/commands/github-actions.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import { createTempDir, cleanupTempDir, mockConsole, mockProcess, } from "../helpers/test-utils"; // Mock external dependencies const mockInquirerPrompt = vi.fn(); const mockOra = vi.fn(); vi.mock("inquirer", () => ({ default: { prompt: mockInquirerPrompt, }, })); vi.mock("ora", () => ({ default: mockOra, })); // Import after mocking const { githubActions } = await import("../../src/commands/github-actions"); describe("github-actions command", () => { let tempDir: string; let consoleMock: ReturnType; let processMock: ReturnType; let spinnerMock: any; beforeEach(async () => { tempDir = await createTempDir("repomirror-github-actions-"); consoleMock = mockConsole(); processMock = mockProcess(true); processMock.cwd.mockReturnValue(tempDir); spinnerMock = { start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), }; mockOra.mockReturnValue(spinnerMock); vi.clearAllMocks(); }); afterEach(async () => { await cleanupTempDir(tempDir); vi.restoreAllMocks(); }); describe("successful workflow generation", () => { beforeEach(async () => { // Create a mock repomirror.yaml const config = { sourceRepo: "./", targetRepo: "../myrepo-transformed", transformationInstructions: "transform to typescript", }; const yaml = await import("yaml"); await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify(config), ); }); it("should create GitHub Actions workflow with defaults", async () => { mockInquirerPrompt.mockResolvedValue({ workflowName: "repomirror-sync.yml", schedule: "0 */6 * * *", autoPush: true, targetRepo: "user/myrepo-transformed", }); await githubActions(); // Check workflow directory was created const workflowDir = join(tempDir, ".github", "workflows"); const dirStats = await fs.stat(workflowDir); expect(dirStats.isDirectory()).toBe(true); // Check workflow file was created const workflowPath = join(workflowDir, "repomirror-sync.yml"); const workflowContent = await fs.readFile(workflowPath, "utf-8"); // Verify content includes expected elements expect(workflowContent).toContain("name: RepoMirror Sync"); expect(workflowContent).toContain("cron: '0 */6 * * *'"); expect(workflowContent).toContain("repository: user/myrepo-transformed"); expect(workflowContent).toContain("if: true"); // Verify success messages expect(spinnerMock.succeed).toHaveBeenCalledWith( "GitHub Actions workflow created", ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("✅ Workflow created"), ); }); it("should handle CLI options correctly", async () => { mockInquirerPrompt.mockResolvedValue({ targetRepo: "org/repo", }); await githubActions({ workflowName: "custom.yml", schedule: "0 0 * * *", autoPush: false, }); const workflowPath = join(tempDir, ".github", "workflows", "custom.yml"); const workflowContent = await fs.readFile(workflowPath, "utf-8"); expect(workflowContent).toContain("cron: '0 0 * * *'"); expect(workflowContent).toContain("if: false"); expect(mockInquirerPrompt).toHaveBeenCalledWith( expect.arrayContaining([ expect.objectContaining({ name: "targetRepo", }), ]), ); }); it("should validate workflow file extension", async () => { mockInquirerPrompt.mockImplementation((questions) => { const workflowQuestion = questions.find( (q: any) => q.name === "workflowName", ); if (workflowQuestion && workflowQuestion.validate) { expect(workflowQuestion.validate("test.yml")).toBe(true); expect(workflowQuestion.validate("test.yaml")).toBe(true); expect(workflowQuestion.validate("test.txt")).toBe( "Workflow file must end with .yml or .yaml", ); } return Promise.resolve({ workflowName: "test.yml", schedule: "0 */6 * * *", autoPush: true, targetRepo: "user/repo", }); }); await githubActions(); }); }); describe("error handling", () => { it("should exit when repomirror.yaml not found", async () => { await expect(githubActions()).rejects.toThrow( "Process exit called with code 1", ); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("repomirror.yaml not found"), ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("npx repomirror init"), ); }); it("should handle file write errors", async () => { // Create config const yaml = await import("yaml"); await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify({ sourceRepo: "./", targetRepo: "../target" }), ); mockInquirerPrompt.mockResolvedValue({ workflowName: "test.yml", schedule: "0 */6 * * *", autoPush: true, targetRepo: "user/repo", }); // Mock writeFile to fail const originalWriteFile = fs.writeFile; vi.spyOn(fs, "writeFile").mockImplementation((path, content, options) => { if (typeof path === "string" && path.endsWith(".yml")) { throw new Error("Permission denied"); } return originalWriteFile(path as any, content, options); }); await expect(githubActions()).rejects.toThrow( "Process exit called with code 1", ); expect(spinnerMock.fail).toHaveBeenCalledWith( "Failed to create workflow", ); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Permission denied"), ); }); }); describe("workflow content generation", () => { beforeEach(async () => { const yaml = await import("yaml"); await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify({ sourceRepo: "./", targetRepo: "../target", transformationInstructions: "test", }), ); }); it("should include all required workflow steps", async () => { mockInquirerPrompt.mockResolvedValue({ workflowName: "test.yml", schedule: "0 */6 * * *", autoPush: true, targetRepo: "user/repo", }); await githubActions(); const workflowPath = join(tempDir, ".github", "workflows", "test.yml"); const content = await fs.readFile(workflowPath, "utf-8"); // Check for essential workflow components expect(content).toContain("on:"); expect(content).toContain("schedule:"); expect(content).toContain("workflow_dispatch:"); expect(content).toContain("uses: actions/checkout@v3"); expect(content).toContain("uses: actions/setup-node@v3"); expect(content).toContain("npm install -g repomirror"); expect(content).toContain("npx repomirror sync"); expect(content).toContain("CLAUDE_API_KEY"); expect(content).toContain("SKIP_CLAUDE_TEST: true"); }); it("should handle custom schedule correctly", async () => { mockInquirerPrompt.mockResolvedValue({ workflowName: "test.yml", schedule: "*/15 * * * *", autoPush: true, targetRepo: "user/repo", }); await githubActions(); const workflowPath = join(tempDir, ".github", "workflows", "test.yml"); const content = await fs.readFile(workflowPath, "utf-8"); expect(content).toContain("cron: '*/15 * * * *'"); }); it("should disable auto-push when requested", async () => { mockInquirerPrompt.mockResolvedValue({ workflowName: "test.yml", schedule: "0 */6 * * *", autoPush: false, targetRepo: "user/repo", }); await githubActions(); const workflowPath = join(tempDir, ".github", "workflows", "test.yml"); const content = await fs.readFile(workflowPath, "utf-8"); expect(content).toContain("if: false"); }); }); describe("user prompts and validation", () => { beforeEach(async () => { const yaml = await import("yaml"); await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify({ sourceRepo: "./", targetRepo: "../myrepo-transformed", transformationInstructions: "test", }), ); }); it("should validate target repo format", async () => { mockInquirerPrompt.mockImplementation((questions) => { const targetQuestion = questions.find( (q: any) => q.name === "targetRepo", ); if (targetQuestion && targetQuestion.validate) { expect(targetQuestion.validate("")).toBe( "Please provide the GitHub repository in owner/repo format", ); expect(targetQuestion.validate("../myrepo-transformed")).toBe( "Please provide the GitHub repository in owner/repo format", ); expect(targetQuestion.validate("user/repo")).toBe(true); } return Promise.resolve({ workflowName: "test.yml", schedule: "0 */6 * * *", autoPush: true, targetRepo: "user/repo", }); }); await githubActions(); }); it("should provide sensible defaults", async () => { mockInquirerPrompt.mockImplementation((questions) => { const nameQuestion = questions.find( (q: any) => q.name === "workflowName", ); const scheduleQuestion = questions.find( (q: any) => q.name === "schedule", ); const pushQuestion = questions.find( (q: any) => q.name === "autoPush", ); expect(nameQuestion?.default).toBe("repomirror-sync.yml"); expect(scheduleQuestion?.default).toBe("0 */6 * * *"); expect(pushQuestion?.default).toBe(true); return Promise.resolve({ workflowName: "test.yml", schedule: "0 */6 * * *", autoPush: true, targetRepo: "user/repo", }); }); await githubActions(); }); }); }); ================================================ FILE: tests/commands/init.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import { createTempDir, cleanupTempDir, mockConsole, mockProcess, createMockGitRepo, } from "../helpers/test-utils"; import { mockInquirerResponses, mockTransformationPrompt, } from "../helpers/fixtures"; // Mock external dependencies at module level const mockInquirerPrompt = vi.fn(); const mockOra = vi.fn(); const mockExeca = vi.fn(); const mockClaudeQuery = vi.fn(); vi.mock("inquirer", () => ({ default: { prompt: mockInquirerPrompt, }, })); vi.mock("ora", () => ({ default: mockOra, })); vi.mock("execa", () => ({ execa: mockExeca, })); vi.mock("@anthropic-ai/claude-code", () => ({ query: mockClaudeQuery, })); // Import the module after mocking const { init } = await import("../../src/commands/init"); describe("init command", () => { let tempSourceDir: string; let tempTargetDir: string; let consoleMock: ReturnType; let processMock: ReturnType; let spinnerMock: any; beforeEach(async () => { // Create temporary directories tempSourceDir = await createTempDir("repomirror-source-"); tempTargetDir = await createTempDir("repomirror-target-"); // Setup mocks consoleMock = mockConsole(); processMock = mockProcess(true); // Throw on process.exit by default // Mock process.cwd to return our temp source directory processMock.cwd.mockReturnValue(tempSourceDir); // Setup spinner mock spinnerMock = { start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), stop: vi.fn().mockReturnThis(), }; mockOra.mockReturnValue(spinnerMock); // Clear all mocks vi.clearAllMocks(); }); afterEach(async () => { // Cleanup temp directories await cleanupTempDir(tempSourceDir); await cleanupTempDir(tempTargetDir); // Restore all mocks vi.restoreAllMocks(); }); describe("successful initialization flow", () => { it("should complete full initialization with all checks passing", async () => { // Setup target directory as git repo with remotes await createMockGitRepo(tempTargetDir, true); // Mock inquirer responses mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); // Mock execa with successful responses mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test // Mock Claude SDK query mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); // Run init await init(); // Verify inquirer was called expect(mockInquirerPrompt).toHaveBeenCalledWith([ expect.objectContaining({ type: "input", name: "sourceRepo", message: "Source Repo you want to transform:", default: "./" }), expect.objectContaining({ type: "input", name: "targetRepo", message: "Where do you want to transform code to:", default: expect.stringMatching(/-transformed$/) }), expect.objectContaining({ type: "input", name: "transformationInstructions", message: "What changes do you want to make:", default: "translate this python repo to typescript" }) ]); // Verify preflight checks were called expect(mockExeca).toHaveBeenCalledWith("git", ["rev-parse", "--git-dir"], { cwd: tempTargetDir }); expect(mockExeca).toHaveBeenCalledWith("git", ["remote", "-v"], { cwd: tempTargetDir }); expect(mockExeca).toHaveBeenCalledWith("claude", ["-p", "say hi"], { timeout: 30000, input: "" }); // Verify Claude query was called expect(mockClaudeQuery).toHaveBeenCalledWith({ prompt: expect.stringContaining("your task is to generate an optimized prompt"), }); // Verify .repomirror directory and files were created const repoMirrorDir = join(tempSourceDir, ".repomirror"); const stats = await fs.stat(repoMirrorDir); expect(stats.isDirectory()).toBe(true); // Check prompt.md const promptContent = await fs.readFile(join(repoMirrorDir, "prompt.md"), "utf8"); expect(promptContent).toBe(mockTransformationPrompt); // Check sync.sh const syncContent = await fs.readFile(join(repoMirrorDir, "sync.sh"), "utf8"); expect(syncContent).toContain("claude -p --output-format=stream-json"); expect(syncContent).toContain(tempTargetDir); // Check ralph.sh const ralphContent = await fs.readFile(join(repoMirrorDir, "ralph.sh"), "utf8"); expect(ralphContent).toContain("while :"); expect(ralphContent).toContain("./.repomirror/sync.sh"); // Check .gitignore const gitignoreContent = await fs.readFile(join(repoMirrorDir, ".gitignore"), "utf8"); expect(gitignoreContent).toBe("claude_output.jsonl\n"); // Check file permissions on scripts const syncStats = await fs.stat(join(repoMirrorDir, "sync.sh")); const ralphStats = await fs.stat(join(repoMirrorDir, "ralph.sh")); expect(syncStats.mode & 0o111).toBeTruthy(); // Executable expect(ralphStats.mode & 0o111).toBeTruthy(); // Executable // Verify success messages expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✅ repomirror initialized successfully!")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Next steps:")); }); }); describe("preflight check failures", () => { beforeEach(() => { // Mock inquirer responses for all failure tests mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); }); it("should fail when target directory does not exist", async () => { const nonExistentDir = "/path/that/does/not/exist"; mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: nonExistentDir, }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(spinnerMock.fail).toHaveBeenCalledWith(expect.stringContaining("does not exist")); }); it("should fail when target directory is not a git repository", async () => { // Create directory but don't make it a git repo await fs.mkdir(tempTargetDir, { recursive: true }); mockExeca.mockRejectedValueOnce(new Error("Not a git repository")); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(mockExeca).toHaveBeenCalledWith("git", ["rev-parse", "--git-dir"], { cwd: tempTargetDir }); expect(spinnerMock.fail).toHaveBeenCalledWith(expect.stringContaining("is not a git repository")); }); it("should fail when target directory has no git remotes", async () => { await createMockGitRepo(tempTargetDir, false); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse success .mockResolvedValueOnce({ stdout: "", exitCode: 0 }); // git remote -v empty await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(mockExeca).toHaveBeenCalledWith("git", ["remote", "-v"], { cwd: tempTargetDir }); expect(spinnerMock.fail).toHaveBeenCalledWith(expect.stringContaining("has no git remotes configured")); }); it("should fail when Claude Code is not configured", async () => { await createMockGitRepo(tempTargetDir, true); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse success .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)", exitCode: 0 }) // git remote -v success .mockRejectedValueOnce(new Error("claude command not found")); // claude test failure await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(mockExeca).toHaveBeenCalledWith("claude", ["-p", "say hi"], { timeout: 30000, input: "" }); expect(spinnerMock.fail).toHaveBeenCalledWith(expect.stringContaining("Claude Code is not properly configured")); }); it("should fail when Claude Code response is too short", async () => { await createMockGitRepo(tempTargetDir, true); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse success .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)", exitCode: 0 }) // git remote -v success .mockResolvedValueOnce({ stdout: "Hi", exitCode: 0 }); // claude test with response too short (< 10 chars) await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(spinnerMock.fail).toHaveBeenCalledWith(expect.stringContaining("response was empty or too short")); }); }); describe("Claude SDK integration", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test }); it("should call Claude SDK with correct metaprompt", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); await init(); expect(mockClaudeQuery).toHaveBeenCalledWith({ prompt: expect.stringContaining("your task is to generate an optimized prompt for repo transformation"), }); const call = mockClaudeQuery.mock.calls[0][0]; expect(call.prompt).toContain("transform python to typescript"); expect(call.prompt).toContain(""); expect(call.prompt).toContain(""); }); it("should handle Claude SDK errors gracefully", async () => { mockClaudeQuery.mockImplementation(async function* () { throw new Error("Claude API error"); }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✖ Failed to generate transformation prompt")); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Claude API error")); }); it("should handle empty Claude SDK response", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: "", }; }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Failed to generate transformation prompt")); }); it("should replace placeholders in generated prompt", async () => { const promptWithPlaceholders = `Your job is to port [SOURCE PATH] to [TARGET PATH] and maintain the repository. Use the [TARGET_PATH]/agent/ directory as a scratchpad.`; mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: promptWithPlaceholders, }; }); await init(); // Check that placeholders were replaced in the prompt.md file const repoMirrorDir = join(tempSourceDir, ".repomirror"); const promptContent = await fs.readFile(join(repoMirrorDir, "prompt.md"), "utf8"); expect(promptContent).not.toContain("[SOURCE PATH]"); expect(promptContent).not.toContain("[TARGET PATH]"); expect(promptContent).not.toContain("[TARGET_PATH]"); expect(promptContent).toContain(mockInquirerResponses.sourceRepo); expect(promptContent).toContain(tempTargetDir); }); }); describe("file creation", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); }); it("should create .repomirror directory", async () => { await init(); const repoMirrorDir = join(tempSourceDir, ".repomirror"); const stats = await fs.stat(repoMirrorDir); expect(stats.isDirectory()).toBe(true); }); it("should create prompt.md with correct content", async () => { await init(); const promptPath = join(tempSourceDir, ".repomirror", "prompt.md"); const content = await fs.readFile(promptPath, "utf8"); expect(content).toBe(mockTransformationPrompt); }); it("should create executable sync.sh script", async () => { await init(); const syncPath = join(tempSourceDir, ".repomirror", "sync.sh"); const content = await fs.readFile(syncPath, "utf8"); const stats = await fs.stat(syncPath); expect(content).toContain("#!/bin/bash"); expect(content).toContain("cat .repomirror/prompt.md"); expect(content).toContain(`--add-dir ${tempTargetDir}`); expect(content).toContain("npx repomirror visualize --debug"); expect(stats.mode & 0o111).toBeTruthy(); // Check executable bit }); it("should create executable ralph.sh script", async () => { await init(); const ralphPath = join(tempSourceDir, ".repomirror", "ralph.sh"); const content = await fs.readFile(ralphPath, "utf8"); const stats = await fs.stat(ralphPath); expect(content).toContain("#!/bin/bash"); expect(content).toContain("while :"); expect(content).toContain("./.repomirror/sync.sh"); expect(content).toContain("sleep 10"); expect(stats.mode & 0o111).toBeTruthy(); // Check executable bit }); it("should create .gitignore file", async () => { await init(); const gitignorePath = join(tempSourceDir, ".repomirror", ".gitignore"); const content = await fs.readFile(gitignorePath, "utf8"); expect(content).toBe("claude_output.jsonl\n"); }); it("should handle file creation errors gracefully", async () => { // Mock fs.mkdir to fail on the .repomirror directory creation // This happens in createRepoMirrorFiles which is inside the try-catch const mkdirSpy = vi.spyOn(fs, "mkdir"); let callCount = 0; mkdirSpy.mockImplementation(async (path, options) => { callCount++; // Let the first call (for config directory) succeed if (callCount === 1) { return Promise.resolve(undefined); } // Fail on the second call (for .repomirror directory) throw new Error("Permission denied"); }); await expect(init()).rejects.toThrow("Process exit called with code 1"); // Verify error message was logged expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✖ Failed to generate transformation prompt")); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Permission denied")); }); }); describe("user interaction", () => { it("should use default values for prompts", async () => { await createMockGitRepo(tempTargetDir, true); const basename = require("path").basename; // Setup mocks to use defaults mockInquirerPrompt.mockResolvedValue({ sourceRepo: "./", targetRepo: `../${basename(tempSourceDir)}-transformed`, transformationInstructions: "translate this python repo to typescript", }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); // Mock directory access for the default target const defaultTarget = `../${basename(tempSourceDir)}-transformed`; vi.spyOn(fs, "access").mockImplementation(async (path) => { if (path === defaultTarget) { return Promise.resolve(); } // Allow access to template files if (typeof path === 'string' && path.includes('templates')) { return Promise.resolve(); } return Promise.reject(new Error("Not found")); }); // Mock readFile for templates vi.spyOn(fs, "readFile").mockImplementation(async (path, encoding) => { if (typeof path === 'string') { if (path.includes('sync.sh.template')) { return `#!/bin/bash cat .repomirror/prompt.md | \\ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir \${targetRepo} | \\ tee -a .repomirror/claude_output.jsonl | \\ npx repomirror visualize --debug;`; } if (path.includes('ralph.sh.template')) { return `#!/bin/bash while :; do ./.repomirror/sync.sh echo -e "===SLEEP===\\n===SLEEP===\\n"; echo 'looping'; sleep 10; done`; } if (path.includes('gitignore.template')) { return 'claude_output.jsonl'; } } return Promise.reject(new Error("File not found")); }); await init(); expect(mockInquirerPrompt).toHaveBeenCalledWith(expect.arrayContaining([ expect.objectContaining({ default: "./" }), expect.objectContaining({ default: expect.stringMatching(/-transformed$/) }), expect.objectContaining({ default: "translate this python repo to typescript" }) ])); }); it("should handle custom user responses", async () => { await createMockGitRepo(tempTargetDir, true); // Create a custom source directory for testing const customSourceDir = await createTempDir("custom-source-"); const customResponses = { sourceRepo: customSourceDir, targetRepo: tempTargetDir, transformationInstructions: "convert java to golang", }; mockInquirerPrompt.mockResolvedValue(customResponses); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: "Custom prompt with java to golang conversion", }; }); await init(); // Verify custom values were used in the Claude query expect(mockClaudeQuery).toHaveBeenCalledWith({ prompt: expect.stringContaining("convert java to golang"), }); // Check generated files contain custom values const syncPath = join(customSourceDir, ".repomirror", "sync.sh"); const syncContent = await fs.readFile(syncPath, "utf8"); expect(syncContent).toContain(tempTargetDir); // Clean up custom source dir await cleanupTempDir(customSourceDir); }); }); describe("error handling and exit codes", () => { it("should exit with code 1 on preflight check failure", async () => { mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: "/nonexistent", }); await expect(init()).rejects.toThrow("Process exit called with code 1"); }); it("should exit with code 1 on Claude SDK failure", async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { throw new Error("Claude API error"); }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Claude API error")); }); it("should handle unexpected errors gracefully", async () => { // Mock inquirer to throw an unexpected error mockInquirerPrompt.mockRejectedValue(new Error("Unexpected error")); await expect(init()).rejects.toThrow("Unexpected error"); }); }); describe("spinner and console output", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); }); it("should show appropriate spinner messages", async () => { await init(); // Check that individual spinners were created for each preflight check expect(mockOra).toHaveBeenCalledWith(expect.stringContaining("Accessing")); expect(mockOra).toHaveBeenCalledWith(expect.stringContaining("Verifying git repository")); expect(mockOra).toHaveBeenCalledWith(expect.stringContaining("Listing git remotes")); expect(mockOra).toHaveBeenCalledWith(" Running Claude Code test command"); // Check that spinners were started and succeeded (4 preflight checks only) expect(spinnerMock.start).toHaveBeenCalledTimes(4); // Check that console.log was called for generating prompt message expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Generating transformation prompt...")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✔ Generated transformation prompt")); }); it("should show correct console output", async () => { await init(); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("I'll help you maintain a transformed copy of this repo:") ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("✅ repomirror initialized successfully!") ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("Next steps:") ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("npx repomirror sync") ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("npx repomirror sync-forever") ); }); }); describe("configuration file handling", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); // Reset mocks between tests in this describe block vi.clearAllMocks(); mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); }); it("should create repomirror.yaml config file", async () => { mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); await init(); const configPath = join(tempSourceDir, "repomirror.yaml"); const configExists = await fs.stat(configPath).then(() => true).catch(() => false); expect(configExists).toBe(true); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); expect(config).toEqual({ sourceRepo: mockInquirerResponses.sourceRepo, targetRepo: tempTargetDir, transformationInstructions: mockInquirerResponses.transformationInstructions, }); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✅ Saved configuration to repomirror.yaml")); }); it("should load existing repomirror.yaml as defaults", async () => { mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test // Create existing config file with the tempTargetDir that's already set up const existingConfig = { sourceRepo: "./existing-source", targetRepo: tempTargetDir, // Use the temp target dir from the test setup transformationInstructions: "existing transformation instructions", }; const yaml = await import("yaml"); const configContent = yaml.stringify(existingConfig); await fs.writeFile(join(tempSourceDir, "repomirror.yaml"), configContent, "utf-8"); // Mock inquirer to use defaults - return the existing config values // Since the existing config is loaded, inquirer should get these as defaults mockInquirerPrompt.mockResolvedValue({ sourceRepo: "./existing-source", targetRepo: tempTargetDir, transformationInstructions: "existing transformation instructions", }); await init(); // Verify the existing config message was shown expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("Found existing repomirror.yaml, using as defaults") ); // Verify the existing config was used expect(mockClaudeQuery).toHaveBeenCalledWith({ prompt: expect.stringContaining("existing transformation instructions"), }); // Verify the config file was updated with the same values const finalConfigContent = await fs.readFile(join(tempSourceDir, "repomirror.yaml"), "utf-8"); const finalConfig = yaml.parse(finalConfigContent); expect(finalConfig).toEqual(existingConfig); }); it("should handle corrupted repomirror.yaml gracefully", async () => { mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test // Create corrupted YAML file await fs.writeFile(join(tempSourceDir, "repomirror.yaml"), "invalid: yaml: content: [", "utf-8"); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); await init(); // Should not show the existing config message expect(consoleMock.log).not.toHaveBeenCalledWith( expect.stringContaining("Found existing repomirror.yaml, using as defaults") ); // Should create new valid config const configPath = join(tempSourceDir, "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); expect(config.sourceRepo).toBeDefined(); }); it("should save config with normalized paths", async () => { mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test const inputPaths = { sourceRepo: "./source/../source/./", targetRepo: "../target/./nested/../", transformationInstructions: "test transformation", }; mockInquirerPrompt.mockResolvedValue({ ...inputPaths, targetRepo: tempTargetDir, // Use valid temp dir for preflight checks }); await init(); // Config should be saved in the source subdirectory when sourceRepo is relative const configPath = join(tempSourceDir, "source/../source/./", "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); // Paths should be saved as entered (init doesn't normalize, that's the user's choice) expect(config.sourceRepo).toBe(inputPaths.sourceRepo); expect(config.transformationInstructions).toBe(inputPaths.transformationInstructions); }); }); describe("CLI flag overrides", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); }); it("should skip prompts when all CLI options provided", async () => { const cliOptions = { sourceRepo: "./cli-source", targetRepo: tempTargetDir, transformationInstructions: "CLI transformation instructions", }; // Mock inquirer to return empty object since all prompts have when: false mockInquirerPrompt.mockResolvedValue({}); await init(cliOptions); // Inquirer should be called but with all when: false conditions expect(mockInquirerPrompt).toHaveBeenCalled(); const promptCall = mockInquirerPrompt.mock.calls[0][0]; expect(promptCall.every((p: any) => p.when === false)).toBe(true); // Verify CLI options were used in Claude query expect(mockClaudeQuery).toHaveBeenCalledWith({ prompt: expect.stringContaining("CLI transformation instructions"), }); // Verify config was saved with CLI values // Config should be saved in the cli-source subdirectory const configPath = join(tempSourceDir, "cli-source", "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); expect(config).toEqual(cliOptions); }); it("should partially override with CLI flags", async () => { const cliOptions = { sourceRepo: "./cli-source", // targetRepo and transformationInstructions will come from prompt }; mockInquirerPrompt.mockResolvedValue({ targetRepo: tempTargetDir, transformationInstructions: "prompted instructions", }); await init(cliOptions); // Should only prompt for missing options const promptCall = mockInquirerPrompt.mock.calls[0][0]; const sourceRepoPrompt = promptCall.find((p: any) => p.name === "sourceRepo"); const targetRepoPrompt = promptCall.find((p: any) => p.name === "targetRepo"); const instructionsPrompt = promptCall.find((p: any) => p.name === "transformationInstructions"); expect(sourceRepoPrompt.when).toBe(false); // Should skip source repo prompt expect(targetRepoPrompt.when).toBe(true); // Should show target repo prompt expect(instructionsPrompt.when).toBe(true); // Should show instructions prompt // Verify final config contains CLI override // Config should be saved in the cli-source subdirectory const configPath = join(tempSourceDir, "cli-source", "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); expect(config.sourceRepo).toBe("./cli-source"); expect(config.targetRepo).toBe(tempTargetDir); expect(config.transformationInstructions).toBe("prompted instructions"); }); it("should prioritize CLI flags over existing config", async () => { // Create existing config const existingConfig = { sourceRepo: "./existing-source", targetRepo: tempTargetDir, // Use the temp dir for preflight checks transformationInstructions: "existing instructions", }; const yaml = await import("yaml"); const configContent = yaml.stringify(existingConfig); await fs.writeFile(join(tempSourceDir, "repomirror.yaml"), configContent, "utf-8"); // CLI overrides part of the config const cliOptions = { targetRepo: tempTargetDir, transformationInstructions: "CLI override instructions", }; // Mock inquirer to return the sourceRepo value since it's the only one not overridden by CLI mockInquirerPrompt.mockResolvedValue({ sourceRepo: "./existing-source", // Only sourceRepo should be prompted since it's not in CLI options }); await init(cliOptions); // Verify CLI overrides were used expect(mockClaudeQuery).toHaveBeenCalledWith({ prompt: expect.stringContaining("CLI override instructions"), }); // Verify final config has CLI overrides // Config gets saved to the final sourceRepo location const finalConfigContent = await fs.readFile(join(tempSourceDir, "existing-source", "repomirror.yaml"), "utf-8"); const finalConfig = yaml.parse(finalConfigContent); expect(finalConfig.sourceRepo).toBe("./existing-source"); // From existing config (no CLI override) expect(finalConfig.targetRepo).toBe(tempTargetDir); // CLI override expect(finalConfig.transformationInstructions).toBe("CLI override instructions"); // CLI override }); it("should handle CLI flags with empty values", async () => { const cliOptions = { sourceRepo: "", targetRepo: tempTargetDir, transformationInstructions: undefined, }; mockInquirerPrompt.mockResolvedValue({ sourceRepo: "./prompted-source", transformationInstructions: "prompted instructions", }); await init(cliOptions); // Empty CLI values should not prevent prompting const promptCall = mockInquirerPrompt.mock.calls[0][0]; const sourceRepoPrompt = promptCall.find((p: any) => p.name === "sourceRepo"); const instructionsPrompt = promptCall.find((p: any) => p.name === "transformationInstructions"); expect(sourceRepoPrompt.when).toBe(true); // Empty string should allow prompting expect(instructionsPrompt.when).toBe(true); // undefined should allow prompting }); }); describe("Claude SDK async iterator edge cases", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test }); it("should handle Claude SDK yielding multiple messages before result", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "other", data: "some data" }; yield { type: "progress", percentage: 50 }; yield { type: "result", is_error: false, result: mockTransformationPrompt, }; yield { type: "after_result", data: "ignored" }; // Should be ignored after break }); await init(); // Should successfully create files with the result const promptContent = await fs.readFile(join(tempSourceDir, ".repomirror", "prompt.md"), "utf8"); expect(promptContent).toBe(mockTransformationPrompt); }); it("should handle Claude SDK yielding error result", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: true, result: "Claude API returned an error", }; }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✖ Failed to generate transformation prompt")); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Claude API returned an error")); }); it("should handle Claude SDK yielding result with missing result field", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, // Missing result field - result property is undefined } as any; }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Failed to generate transformation prompt")); }); it("should handle Claude SDK iterator that never yields a result", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "other", data: "some data" }; yield { type: "progress", percentage: 100 }; // Never yields a result type }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Failed to generate transformation prompt - no result received") ); }); it("should handle Claude SDK network timeout gracefully", async () => { mockClaudeQuery.mockImplementation(async function* () { await new Promise(resolve => setTimeout(resolve, 10)); // Small delay throw new Error("Network timeout"); }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("✖ Failed to generate transformation prompt")); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Network timeout")); }); it("should handle Claude SDK iterator throwing on first yield", async () => { let hasYielded = false; mockClaudeQuery.mockImplementation(async function* () { if (!hasYielded) { hasYielded = true; throw new Error("Iterator initialization failed"); } }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith(expect.stringContaining("Iterator initialization failed")); }); it("should handle partial Claude SDK response objects", async () => { mockClaudeQuery.mockImplementation(async function* () { yield { type: "progress" }; // Missing other fields yield { type: "other", data: "some data" }; // Different type yield { type: "status", status: "processing" }; // Different type yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); await init(); // Should handle partial responses gracefully and use the valid result const promptContent = await fs.readFile(join(tempSourceDir, ".repomirror", "prompt.md"), "utf8"); expect(promptContent).toBe(mockTransformationPrompt); }); }); describe("path resolution and normalization edge cases", () => { beforeEach(async () => { mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); }); it("should handle absolute paths correctly", async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ sourceRepo: tempSourceDir, // absolute path targetRepo: tempTargetDir, // absolute path transformationInstructions: "test transformation", }); await init(); // Check sync.sh contains the absolute path const syncContent = await fs.readFile(join(tempSourceDir, ".repomirror", "sync.sh"), "utf8"); expect(syncContent).toContain(`--add-dir ${tempTargetDir}`); }); it("should handle relative paths with dots and slashes", async () => { await createMockGitRepo(tempTargetDir, true); const relativePaths = { sourceRepo: "./src/../src/./", targetRepo: tempTargetDir, // Use real path for preflight checks transformationInstructions: "test transformation", }; mockInquirerPrompt.mockResolvedValue(relativePaths); await init(); // Check that paths are preserved as entered in the config // Config should be saved in the src/../src/./ subdirectory const configPath = join(tempSourceDir, "src/../src/./", "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); expect(config.sourceRepo).toBe("./src/../src/./"); }); it("should handle paths with spaces", async () => { // Create temp dir with spaces const tempDirWithSpaces = await createTempDir("repo mirror test "); await createMockGitRepo(tempDirWithSpaces, true); try { mockInquirerPrompt.mockResolvedValue({ sourceRepo: "./source with spaces", targetRepo: tempDirWithSpaces, transformationInstructions: "test transformation", }); await init(); // Check sync.sh properly handles the path with spaces // Files should be in the "source with spaces" subdirectory const syncContent = await fs.readFile(join(tempSourceDir, "source with spaces", ".repomirror", "sync.sh"), "utf8"); expect(syncContent).toContain(`--add-dir ${tempDirWithSpaces}`); } finally { await cleanupTempDir(tempDirWithSpaces); } }); it("should handle empty and invalid path values", async () => { mockInquirerPrompt.mockResolvedValue({ sourceRepo: "", targetRepo: "/invalid/nonexistent/path", transformationInstructions: "test transformation", }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(spinnerMock.fail).toHaveBeenCalledWith( expect.stringContaining("does not exist") ); }); it("should handle very long paths", async () => { const longDirName = "a".repeat(100); const tempLongDir = await createTempDir(`repomirror-long-${longDirName}-`); await createMockGitRepo(tempLongDir, true); try { mockInquirerPrompt.mockResolvedValue({ sourceRepo: "./" + "nested/".repeat(20), targetRepo: tempLongDir, transformationInstructions: "test transformation", }); await init(); // Should handle long paths without issue // Config should be saved in the nested subdirectory const configPath = join(tempSourceDir, "." + "/nested".repeat(20), "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const yaml = await import("yaml"); const config = yaml.parse(configContent); expect(config.targetRepo).toBe(tempLongDir); } finally { await cleanupTempDir(tempLongDir); } }); }); describe("script generation edge cases", () => { beforeEach(async () => { await createMockGitRepo(tempTargetDir, true); mockInquirerPrompt.mockResolvedValue({ ...mockInquirerResponses, targetRepo: tempTargetDir, }); mockExeca .mockResolvedValueOnce({ stdout: ".git", exitCode: 0 }) // git rev-parse .mockResolvedValueOnce({ stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }) // git remote -v .mockResolvedValueOnce({ stdout: "Hi there! How can I help you today?", exitCode: 0 }); // claude test mockClaudeQuery.mockImplementation(async function* () { yield { type: "result", is_error: false, result: mockTransformationPrompt, }; }); }); it("should create scripts with exact file permissions", async () => { await init(); const syncPath = join(tempSourceDir, ".repomirror", "sync.sh"); const ralphPath = join(tempSourceDir, ".repomirror", "ralph.sh"); const syncStats = await fs.stat(syncPath); const ralphStats = await fs.stat(ralphPath); // Check exact mode (should be 0o755) expect(syncStats.mode & 0o777).toBe(0o755); expect(ralphStats.mode & 0o777).toBe(0o755); // Check owner permissions expect(syncStats.mode & 0o700).toBe(0o700); // rwx for owner expect(ralphStats.mode & 0o700).toBe(0o700); // rwx for owner }); it("should generate sync.sh with proper bash escaping", async () => { await init(); const syncContent = await fs.readFile(join(tempSourceDir, ".repomirror", "sync.sh"), "utf8"); // Check shebang expect(syncContent.startsWith("#!/bin/bash")).toBe(true); // Check line continuation expect(syncContent).toContain(" | \\"); // Check command structure expect(syncContent).toContain("cat .repomirror/prompt.md"); expect(syncContent).toContain("claude -p --output-format=stream-json"); expect(syncContent).toContain("--verbose --dangerously-skip-permissions"); expect(syncContent).toContain("tee -a .repomirror/claude_output.jsonl"); expect(syncContent).toContain("npx repomirror visualize --debug"); }); it("should generate ralph.sh with proper loop structure", async () => { await init(); const ralphContent = await fs.readFile(join(tempSourceDir, ".repomirror", "ralph.sh"), "utf8"); // Check shebang expect(ralphContent.startsWith("#!/bin/bash")).toBe(true); // Check loop structure expect(ralphContent).toContain("while :; do"); expect(ralphContent).toContain("./.repomirror/sync.sh"); expect(ralphContent).toContain("echo -e \"===SLEEP===\\n===SLEEP===\\n\"; echo 'looping';"); expect(ralphContent).toContain("sleep 10;"); expect(ralphContent).toContain("done"); }); it("should handle file creation permission errors", async () => { // Mock writeFile to fail on script creation const originalWriteFile = fs.writeFile; const writeFileSpy = vi.spyOn(fs, "writeFile").mockImplementation(async (path, content, options) => { if (typeof path === 'string' && (path.endsWith('sync.sh') || path.endsWith('ralph.sh'))) { throw new Error("Permission denied: Cannot create executable file"); } return originalWriteFile(path as any, content, options); }); await expect(init()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Permission denied: Cannot create executable file") ); writeFileSpy.mockRestore(); }); it("should create gitignore with correct content and no extra whitespace", async () => { await init(); const gitignoreContent = await fs.readFile(join(tempSourceDir, ".repomirror", ".gitignore"), "utf8"); // Check exact content expect(gitignoreContent).toBe("claude_output.jsonl\n"); // Verify no extra whitespace expect(gitignoreContent.trim()).toBe("claude_output.jsonl"); expect(gitignoreContent.split('\n')).toHaveLength(2); // content + empty line }); }); }); ================================================ FILE: tests/commands/pull.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { execa } from "execa"; import { pull } from "../../src/commands/pull"; // Mock dependencies vi.mock("fs", () => ({ promises: { readFile: vi.fn(), access: vi.fn(), }, })); vi.mock("execa"); vi.mock("chalk", () => ({ default: { red: vi.fn((text) => text), green: vi.fn((text) => text), yellow: vi.fn((text) => text), cyan: vi.fn((text) => text), gray: vi.fn((text) => text), blue: vi.fn((text) => text), }, })); vi.mock("ora", () => ({ default: vi.fn(() => ({ start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), warn: vi.fn().mockReturnThis(), })), })); // Mock console methods const mockConsoleError = vi.spyOn(console, "error").mockImplementation(() => {}); const mockProcessExit = vi.spyOn(process, "exit").mockImplementation((code?: number) => { throw new Error(`process.exit unexpectedly called with "${code}"`); return undefined as never; }); describe("pull command", () => { beforeEach(() => { vi.clearAllMocks(); }); afterEach(() => { vi.restoreAllMocks(); }); describe("basic functionality", () => { it("should exit with error when repomirror.yaml not found", async () => { vi.mocked(fs.readFile).mockRejectedValue(new Error("ENOENT")); await expect(() => pull()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should exit with error when source directory does not exist", async () => { vi.mocked(fs.readFile).mockResolvedValue(` sourceRepo: ./nonexistent targetRepo: ../target transformationInstructions: test transformation pull: source_remote: upstream source_branch: main`); vi.mocked(fs.access).mockRejectedValue(new Error("ENOENT")); await expect(() => pull()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should trigger sync when auto_sync is enabled and changes are pulled", async () => { const config = ` sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation pull: source_remote: upstream source_branch: main auto_sync: true`; vi.mocked(fs.readFile).mockResolvedValue(config); vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: ".git", stderr: "" } as any) // git rev-parse --git-dir .mockResolvedValueOnce({ stdout: "main", stderr: "" } as any) // git branch --show-current .mockResolvedValueOnce({ stdout: "origin\nupstream", stderr: "" } as any) // git remote .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git status --porcelain .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git fetch upstream .mockResolvedValueOnce({ stdout: "1", stderr: "" } as any) // git rev-list --count HEAD..upstream/main .mockResolvedValueOnce({ stdout: "abc123f New feature", stderr: "" } as any) // git log --oneline .mockResolvedValueOnce({ stdout: "Updated 1 file", stderr: "" } as any) // git pull upstream main .mockResolvedValueOnce({ stdout: "Sync completed", stderr: "" } as any); // bash sync.sh await expect(pull()).resolves.toBeUndefined(); expect(vi.mocked(execa)).toHaveBeenCalledWith( "bash", expect.arrayContaining([expect.stringContaining("sync.sh")]), expect.objectContaining({ stdio: "inherit" }) ); }); it("should handle --check option without pulling", async () => { const config = ` sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation pull: source_remote: upstream source_branch: main`; vi.mocked(fs.readFile).mockResolvedValue(config); vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: ".git", stderr: "" } as any) // git rev-parse --git-dir .mockResolvedValueOnce({ stdout: "main", stderr: "" } as any) // git branch --show-current .mockResolvedValueOnce({ stdout: "origin\nupstream", stderr: "" } as any) // git remote .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git status --porcelain .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git fetch upstream .mockResolvedValueOnce({ stdout: "2", stderr: "" } as any) // git rev-list --count HEAD..upstream/main .mockResolvedValueOnce({ stdout: "abc123f Fix bug\ndef456a Update docs", stderr: "" } as any); // git log --oneline await expect(pull({ check: true })).resolves.toBeUndefined(); // Should not attempt to pull expect(vi.mocked(execa)).not.toHaveBeenCalledWith( expect.anything(), expect.arrayContaining(["pull"]), expect.anything() ); }); it("should skip sync with --source-only option", async () => { const config = ` sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation pull: source_remote: upstream source_branch: main`; vi.mocked(fs.readFile).mockResolvedValue(config); vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: ".git", stderr: "" } as any) // git rev-parse --git-dir .mockResolvedValueOnce({ stdout: "main", stderr: "" } as any) // git branch --show-current .mockResolvedValueOnce({ stdout: "origin\nupstream", stderr: "" } as any) // git remote .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git status --porcelain .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git fetch upstream .mockResolvedValueOnce({ stdout: "1", stderr: "" } as any) // git rev-list --count HEAD..upstream/main .mockResolvedValueOnce({ stdout: "abc123f New feature", stderr: "" } as any) // git log --oneline .mockResolvedValueOnce({ stdout: "Updated 1 file", stderr: "" } as any); // git pull upstream main await expect(pull({ sourceOnly: true })).resolves.toBeUndefined(); // Should not attempt to run sync scripts expect(vi.mocked(execa)).not.toHaveBeenCalledWith( "bash", expect.arrayContaining([expect.stringContaining("sync.sh")]), expect.anything() ); }); }); }); ================================================ FILE: tests/commands/push.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { execa } from "execa"; import chalk from "chalk"; import { push } from "../../src/commands/push"; // Mock dependencies vi.mock("fs", () => ({ promises: { readFile: vi.fn(), access: vi.fn(), }, })); vi.mock("execa"); vi.mock("chalk", () => ({ default: { red: vi.fn((text) => text), green: vi.fn((text) => text), yellow: vi.fn((text) => text), cyan: vi.fn((text) => text), gray: vi.fn((text) => text), }, })); vi.mock("ora", () => ({ default: vi.fn(() => ({ start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), warn: vi.fn().mockReturnThis(), })), })); // Mock console methods const mockConsoleLog = vi.spyOn(console, "log").mockImplementation(() => {}); const mockConsoleError = vi.spyOn(console, "error").mockImplementation(() => {}); const mockProcessExit = vi.spyOn(process, "exit").mockImplementation((code?: number) => { throw new Error(`process.exit unexpectedly called with "${code}"`); return undefined as never; }); describe("push command", () => { beforeEach(() => { vi.clearAllMocks(); }); afterEach(() => { vi.restoreAllMocks(); }); describe("configuration loading", () => { it("should exit with error when repomirror.yaml not found", async () => { // Mock file not found vi.mocked(fs.readFile).mockRejectedValue(new Error("ENOENT")); await expect(() => push()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should exit with error when no remotes configured", async () => { vi.mocked(fs.readFile).mockResolvedValue("sourceRepo: ./src\ntargetRepo: ../target\ntransformationInstructions: test transformation\nremotes: {}"); await expect(() => push()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); it("should handle no changes to commit gracefully", async () => { vi.mocked(fs.readFile).mockResolvedValue( `sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation remotes: origin: url: https://github.com/test/repo.git branch: main auto_push: false push: default_remote: origin default_branch: main commit_prefix: "[repomirror]"` ); // Mock target directory exists and is a git repo vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: ".git", stderr: "" } as any) // git rev-parse --git-dir .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git diff --cached --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git diff --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any); // git ls-files --others --exclude-standard // Should complete successfully without errors await expect(push()).resolves.toBeUndefined(); }); }); describe("git operations", () => { beforeEach(() => { const config = `sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation remotes: origin: url: https://github.com/test/repo.git branch: main auto_push: false push: default_remote: origin default_branch: main commit_prefix: "[repomirror]"`; vi.mocked(fs.readFile).mockResolvedValue(config); vi.mocked(fs.access).mockResolvedValue(undefined); vi.mocked(execa) .mockResolvedValueOnce({ stdout: ".git", stderr: "" } as any); // git rev-parse --git-dir }); it("should detect changes and create commit", async () => { // Mock git status showing changes vi.mocked(execa) .mockResolvedValueOnce({ stdout: "file1.txt", stderr: "" } as any) // git diff --cached --name-only .mockResolvedValueOnce({ stdout: "file2.txt", stderr: "" } as any) // git diff --name-only .mockResolvedValueOnce({ stdout: "file3.txt", stderr: "" } as any) // git ls-files --others --exclude-standard .mockResolvedValueOnce({ stdout: "abc123f", stderr: "" } as any) // git rev-parse HEAD (source) .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git add . .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git commit -m .mockResolvedValueOnce({ stdout: "success", stderr: "" } as any); // git push await push(); expect(vi.mocked(execa)).toHaveBeenCalledWith( "git", ["commit", "-m", expect.stringContaining("[repomirror]")], { cwd: "../target" } ); expect(vi.mocked(execa)).toHaveBeenCalledWith( "git", ["push", "origin", "main"], { cwd: "../target", timeout: 60000 } ); }); it("should handle push to all remotes", async () => { const config = `sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation remotes: origin: url: https://github.com/test/repo.git branch: main staging: url: https://github.com/test/staging.git branch: develop push: default_remote: origin`; vi.mocked(fs.readFile).mockResolvedValue(config); // Mock git status showing changes vi.mocked(execa) .mockResolvedValueOnce({ stdout: "file1.txt", stderr: "" } as any) // git diff --cached --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git diff --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git ls-files --others --exclude-standard .mockResolvedValueOnce({ stdout: "abc123f", stderr: "" } as any) // git rev-parse HEAD (source) .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git add . .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git commit -m .mockResolvedValueOnce({ stdout: "success", stderr: "" } as any) // git push origin main .mockResolvedValueOnce({ stdout: "success", stderr: "" } as any); // git push staging develop await push({ all: true }); expect(vi.mocked(execa)).toHaveBeenCalledWith( "git", ["push", "origin", "main"], { cwd: "../target", timeout: 60000 } ); expect(vi.mocked(execa)).toHaveBeenCalledWith( "git", ["push", "staging", "develop"], { cwd: "../target", timeout: 60000 } ); }); it("should perform dry run without committing", async () => { // Mock git status showing changes vi.mocked(execa) .mockResolvedValueOnce({ stdout: "file1.txt", stderr: "" } as any) // git diff --cached --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git diff --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git ls-files --others --exclude-standard .mockResolvedValueOnce({ stdout: "abc123f", stderr: "" } as any) // git rev-parse HEAD (source) .mockResolvedValueOnce({ stdout: "dry-run output", stderr: "" } as any); // git push --dry-run await push({ dryRun: true }); // Should not call git add or git commit expect(vi.mocked(execa)).not.toHaveBeenCalledWith( "git", ["add", "."], expect.any(Object) ); expect(vi.mocked(execa)).not.toHaveBeenCalledWith( "git", expect.arrayContaining(["commit"]), expect.any(Object) ); // Should call git push --dry-run expect(vi.mocked(execa)).toHaveBeenCalledWith( "git", ["push", "--dry-run", "origin", "main"], { cwd: "../target", timeout: 60000 } ); }); }); describe("error handling", () => { it("should handle authentication errors gracefully", async () => { const config = `sourceRepo: ./src targetRepo: ../target transformationInstructions: test transformation remotes: origin: url: https://github.com/test/repo.git branch: main push: default_remote: origin`; vi.mocked(fs.readFile).mockResolvedValue(config); vi.mocked(fs.access).mockResolvedValue(undefined); // Mock successful initial checks but failed push with auth error vi.mocked(execa) .mockResolvedValueOnce({ stdout: ".git", stderr: "" } as any) // git rev-parse --git-dir .mockResolvedValueOnce({ stdout: "file1.txt", stderr: "" } as any) // git diff --cached --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git diff --name-only .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git ls-files --others --exclude-standard .mockResolvedValueOnce({ stdout: "abc123f", stderr: "" } as any) // git rev-parse HEAD (source) .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git add . .mockResolvedValueOnce({ stdout: "", stderr: "" } as any) // git commit -m .mockRejectedValueOnce(new Error("authentication failed")); // git push fails await expect(() => push()).rejects.toThrow("process.exit unexpectedly called with \"1\""); }); }); }); ================================================ FILE: tests/commands/setup-github-pr-sync.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import yaml from "yaml"; import { createTempDir, cleanupTempDir, mockConsole, mockProcess } from "../helpers"; // Mock external dependencies const mockInquirerPrompt = vi.fn(); const mockOra = vi.fn(); vi.mock("inquirer", () => ({ default: { prompt: mockInquirerPrompt, }, })); vi.mock("ora", () => ({ default: mockOra, })); // Import after mocking const { setupGithubPrSync } = await import("../../src/commands/setup-github-pr-sync"); describe("setup-github-pr-sync command", () => { let tempDir: string; let consoleMock: ReturnType; let processMock: ReturnType; let spinnerMock: any; beforeEach(async () => { tempDir = await createTempDir("repomirror-setup-pr-sync-"); consoleMock = mockConsole(); processMock = mockProcess(true); processMock.cwd.mockReturnValue(tempDir); spinnerMock = { start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), }; mockOra.mockReturnValue(spinnerMock); vi.clearAllMocks(); }); afterEach(async () => { await cleanupTempDir(tempDir); vi.restoreAllMocks(); }); describe("configuration validation", () => { it("should error if repomirror.yaml doesn't exist", async () => { await expect(setupGithubPrSync()).rejects.toThrow("Process exit called with code 1"); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should read existing configuration successfully", async () => { // Create a valid repomirror.yaml const config = { sourceRepo: "./", targetRepo: "../test-transformed", transformationInstructions: "convert to typescript", }; await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify(config) ); mockInquirerPrompt.mockResolvedValue({ targetRepo: "myorg/myrepo", timesToLoop: 3, }); await setupGithubPrSync(); // Verify workflow was created const workflowPath = join(tempDir, ".github", "workflows", "repomirror.yml"); const workflowExists = await fs.access(workflowPath).then(() => true).catch(() => false); expect(workflowExists).toBe(true); }); }); describe("workflow generation", () => { beforeEach(async () => { // Create a valid repomirror.yaml const config = { sourceRepo: "./", targetRepo: "../test-transformed", transformationInstructions: "convert to typescript", }; await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify(config) ); }); it("should create workflow with correct content", async () => { mockInquirerPrompt.mockResolvedValue({ targetRepo: "myorg/myrepo", timesToLoop: 5, }); await setupGithubPrSync(); const workflowPath = join(tempDir, ".github", "workflows", "repomirror.yml"); const workflowContent = await fs.readFile(workflowPath, "utf-8"); expect(workflowContent).toContain("repository: myorg/myrepo"); expect(workflowContent).toContain("for i in $(seq 1 5);"); expect(workflowContent).toContain("workflow_dispatch:"); expect(workflowContent).toContain("ANTHROPIC_API_KEY"); expect(workflowContent).toContain("npx repomirror sync-one --auto-push"); }); it("should handle CLI options", async () => { await setupGithubPrSync({ targetRepo: "testorg/testrepo", timesToLoop: 2, }); const workflowPath = join(tempDir, ".github", "workflows", "repomirror.yml"); const workflowContent = await fs.readFile(workflowPath, "utf-8"); expect(workflowContent).toContain("repository: testorg/testrepo"); expect(workflowContent).toContain("for i in $(seq 1 2);"); }); it("should update repomirror.yaml with github-pr-sync settings", async () => { mockInquirerPrompt.mockResolvedValue({ targetRepo: "myorg/myrepo", timesToLoop: 3, }); await setupGithubPrSync(); const configPath = join(tempDir, "repomirror.yaml"); const configContent = await fs.readFile(configPath, "utf-8"); const config = yaml.parse(configContent); expect(config["github-pr-sync"]).toEqual({ targetRepo: "myorg/myrepo", timesToLoop: 3, }); }); }); describe("overwrite protection", () => { beforeEach(async () => { // Create a valid repomirror.yaml const config = { sourceRepo: "./", targetRepo: "../test-transformed", transformationInstructions: "convert to typescript", }; await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify(config) ); // Create existing workflow const workflowDir = join(tempDir, ".github", "workflows"); await fs.mkdir(workflowDir, { recursive: true }); await fs.writeFile(join(workflowDir, "repomirror.yml"), "existing content"); }); it("should prompt before overwriting existing workflow", async () => { mockInquirerPrompt .mockResolvedValueOnce({ shouldOverwrite: false }) .mockResolvedValueOnce({ targetRepo: "myorg/myrepo", timesToLoop: 3, }); await expect(setupGithubPrSync()).rejects.toThrow("Process exit called with code 0"); expect(processMock.exit).toHaveBeenCalledWith(0); }); it("should overwrite when --overwrite flag is used", async () => { mockInquirerPrompt.mockResolvedValue({ targetRepo: "myorg/myrepo", timesToLoop: 3, }); await setupGithubPrSync({ overwrite: true }); const workflowPath = join(tempDir, ".github", "workflows", "repomirror.yml"); const workflowContent = await fs.readFile(workflowPath, "utf-8"); expect(workflowContent).toContain("repository: myorg/myrepo"); expect(workflowContent).not.toBe("existing content"); }); }); describe("defaults from existing config", () => { it("should use existing github-pr-sync settings as defaults", async () => { const config = { sourceRepo: "./", targetRepo: "../test-transformed", transformationInstructions: "convert to typescript", "github-pr-sync": { targetRepo: "existing/repo", timesToLoop: 4, }, }; await fs.writeFile( join(tempDir, "repomirror.yaml"), yaml.stringify(config) ); // Mock prompts to use defaults mockInquirerPrompt.mockResolvedValue({ targetRepo: "existing/repo", // Should use existing default timesToLoop: 4, // Should use existing default }); await setupGithubPrSync(); const workflowPath = join(tempDir, ".github", "workflows", "repomirror.yml"); const workflowContent = await fs.readFile(workflowPath, "utf-8"); expect(workflowContent).toContain("repository: existing/repo"); expect(workflowContent).toContain("for i in $(seq 1 4);"); }); }); }); ================================================ FILE: tests/commands/simple.test.ts ================================================ import { describe, it, expect, vi } from "vitest"; import { createTempDir, cleanupTempDir, mockConsole } from "../helpers/test-utils"; describe("command utilities", () => { it("should create and cleanup temporary directories", async () => { const tempDir = await createTempDir("test-"); expect(tempDir).toMatch(/test-/); // Directory should exist const fs = await import("fs"); const stats = await fs.promises.stat(tempDir); expect(stats.isDirectory()).toBe(true); // Cleanup should work await cleanupTempDir(tempDir); }); it("should mock console methods", () => { const consoleMock = mockConsole(); console.log("test message"); console.error("error message"); expect(consoleMock.log).toHaveBeenCalledWith("test message"); expect(consoleMock.error).toHaveBeenCalledWith("error message"); vi.restoreAllMocks(); }); it("should work with TypeScript imports", async () => { // Test that we can import from source with TypeScript const { basename } = await import("path"); const result = basename("/some/path/file.txt"); expect(result).toBe("file.txt"); }); }); ================================================ FILE: tests/commands/sync-forever.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import { createTempDir, cleanupTempDir, mockConsole, mockProcess, createMockFileStructure, } from "../helpers/test-utils"; // Mock external dependencies at module level const mockExeca = vi.fn(); vi.mock("execa", () => ({ execa: mockExeca, })); // Import the module after mocking const { syncForever } = await import("../../src/commands/sync-forever"); describe("sync-forever command", () => { let tempDir: string; let consoleMock: ReturnType; let processMock: ReturnType; beforeEach(async () => { // Create temporary directory tempDir = await createTempDir("repomirror-sync-forever-"); // Setup mocks consoleMock = mockConsole(); processMock = mockProcess(true); // Throw on process.exit by default // Mock process.cwd to return our temp directory processMock.cwd.mockReturnValue(tempDir); // Clear all mocks vi.clearAllMocks(); }); afterEach(async () => { // Cleanup temp directory await cleanupTempDir(tempDir); // Restore all mocks vi.restoreAllMocks(); }); describe("successful execution", () => { it("should execute ralph.sh successfully when script exists", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": `#!/bin/bash while :; do ./.repomirror/sync.sh echo -e "===SLEEP===\\n===SLEEP===\\n"; echo 'looping'; sleep 10; done`, }, }); // Mock successful execa execution mockExeca.mockResolvedValue({ stdout: "Continuous sync running", stderr: "", exitCode: 0, }); // Run syncForever await syncForever(); // Verify execa was called with correct parameters expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "ralph.sh")], { stdio: "inherit", cwd: tempDir, }); // Verify console output expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running ralph.sh (continuous sync)...")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Press Ctrl+C to stop")); // Verify process.exit was not called (successful execution) expect(processMock.exit).not.toHaveBeenCalled(); }); it("should use correct working directory and script path", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'continuous sync'", }, }); // Mock successful execa execution mockExeca.mockResolvedValue({ stdout: "continuous sync", stderr: "", exitCode: 0, }); await syncForever(); const expectedScriptPath = join(tempDir, ".repomirror", "ralph.sh"); // Verify execa was called with absolute path to ralph.sh expect(mockExeca).toHaveBeenCalledWith("bash", [expectedScriptPath], { stdio: "inherit", cwd: tempDir, }); }); it("should inherit stdio for continuous output monitoring", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\nwhile true; do echo 'continuous'; sleep 1; done", }, }); mockExeca.mockResolvedValue({ stdout: "continuous", stderr: "", exitCode: 0, }); await syncForever(); // Verify stdio: "inherit" was passed to execa for real-time output expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "ralph.sh")], expect.objectContaining({ stdio: "inherit", }) ); }); }); describe("error cases", () => { it("should exit with error when .repomirror/ralph.sh does not exist", async () => { // Don't create the ralph.sh script await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify error message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); // Verify execa was not called expect(mockExeca).not.toHaveBeenCalled(); }); it("should exit with error when .repomirror directory does not exist", async () => { // Don't create the .repomirror directory at all await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify error message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); // Verify execa was not called expect(mockExeca).not.toHaveBeenCalled(); }); it("should handle script execution errors gracefully", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\nexit 1", }, }); // Mock failed execa execution const scriptError = new Error("Ralph script execution failed"); (scriptError as any).exitCode = 1; mockExeca.mockRejectedValue(scriptError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify initial success messages expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running ralph.sh (continuous sync)...")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Press Ctrl+C to stop")); // Verify error message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Ralph script execution failed") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle non-Error exceptions in script execution", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'failing'", }, }); // Mock execa to throw a non-Error object mockExeca.mockRejectedValue("String error in ralph"); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify error message handles non-Error exceptions expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: String error in ralph") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); }); }); describe("SIGINT signal handling", () => { it("should handle SIGINT gracefully with user-friendly message", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": `#!/bin/bash trap 'exit 0' SIGINT while true; do echo "Running..." sleep 1 done`, }, }); // Mock execa to simulate SIGINT (Ctrl+C) const sigintError = new Error("Process interrupted"); (sigintError as any).signal = "SIGINT"; mockExeca.mockRejectedValue(sigintError); // Should not throw or exit - SIGINT is handled gracefully await syncForever(); // Verify user-friendly stop message expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); // Verify process.exit was NOT called (graceful shutdown) expect(processMock.exit).not.toHaveBeenCalled(); }); it("should distinguish SIGINT from other errors", async () => { // Create .repomirror directory and ralph.sh script await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'running'", }, }); // Mock execa to simulate a non-SIGINT error const otherError = new Error("Network error"); (otherError as any).signal = "SIGTERM"; mockExeca.mockRejectedValue(otherError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Should show error message, not the user-friendly stop message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Network error") ); expect(consoleMock.log).not.toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle Error objects with SIGINT signal correctly", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\ntrap 'exit 0' SIGINT; sleep 1000", }, }); // Create proper Error object with SIGINT signal const sigintError = new Error("Command was killed with SIGINT"); (sigintError as any).signal = "SIGINT"; mockExeca.mockRejectedValue(sigintError); // Should complete without throwing await syncForever(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); expect(processMock.exit).not.toHaveBeenCalled(); }); it("should handle non-Error objects with SIGINT signal", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'test'", }, }); // Mock with non-Error object that has signal property const sigintObj = { signal: "SIGINT", message: "Interrupted" }; mockExeca.mockRejectedValue(sigintObj); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Non-Error objects should be treated as regular errors expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed:") ); expect(consoleMock.log).not.toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); }); }); describe("console output verification", () => { beforeEach(async () => { // Create .repomirror directory and ralph.sh script for all output tests await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": `#!/bin/bash echo "Continuous sync" while true; do sleep 1; done`, }, }); }); it("should show cyan colored startup messages", async () => { mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await syncForever(); // Check that the startup messages were logged expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running ralph.sh (continuous sync)...")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Press Ctrl+C to stop")); }); it("should show yellow colored stop message on SIGINT", async () => { const sigintError = new Error("Interrupted"); (sigintError as any).signal = "SIGINT"; mockExeca.mockRejectedValue(sigintError); await syncForever(); // Check that the stop message was logged in yellow expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); }); it("should show red colored error message on failure", async () => { const error = new Error("Command failed"); mockExeca.mockRejectedValue(error); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Check that the error message was logged expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Command failed") ); }); it("should show red colored error message when ralph.sh is missing", async () => { // Remove the ralph.sh script await fs.rm(join(tempDir, ".repomirror", "ralph.sh")); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Check that the missing file error message was logged expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.") ); }); }); describe("file system access patterns", () => { it("should use fs.access to check ralph.sh existence", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); // Create the script so access check passes await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'continuous'", }, }); mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await syncForever(); // Verify fs.access was called with the correct path expect(fsAccessSpy).toHaveBeenCalledWith(join(tempDir, ".repomirror", "ralph.sh")); fsAccessSpy.mockRestore(); }); it("should handle permission denied errors on file access", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); fsAccessSpy.mockRejectedValue(new Error("EACCES: permission denied")); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify the error message still shows file not found (since we catch all access errors) expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.") ); fsAccessSpy.mockRestore(); }); }); describe("continuous execution scenarios", () => { it("should handle long-running scripts appropriately", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": `#!/bin/bash while true; do echo "Syncing..." sleep 5 done`, }, }); // Mock long-running process that eventually completes mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await syncForever(); expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "ralph.sh")], { stdio: "inherit", cwd: tempDir, }); }); it("should handle scripts with complex loop logic", async () => { const complexScript = `#!/bin/bash set -euo pipefail cleanup() { echo "Cleaning up..." exit 0 } trap cleanup SIGINT SIGTERM while :; do echo "Starting sync cycle..." # Run sync if ./.repomirror/sync.sh; then echo "Sync successful" else echo "Sync failed, continuing anyway..." fi echo -e "===SLEEP===\\n===SLEEP===\\n" echo 'Waiting before next cycle...' sleep 10 done`; await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": complexScript, }, }); mockExeca.mockResolvedValue({ stdout: "Complex script output", stderr: "", exitCode: 0 }); await syncForever(); // Should execute the complex script successfully expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "ralph.sh")], { stdio: "inherit", cwd: tempDir, }); }); it("should handle empty script content", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "", }, }); mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await syncForever(); // Should still execute successfully expect(mockExeca).toHaveBeenCalled(); }); }); describe("process and signal handling edge cases", () => { it("should preserve working directory context for continuous execution", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\npwd; while true; do sleep 1; done", }, }); mockExeca.mockResolvedValue({ stdout: tempDir, stderr: "", exitCode: 0 }); await syncForever(); // Verify that execa is called with the correct working directory expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "ralph.sh")], expect.objectContaining({ cwd: tempDir, }) ); }); it("should handle bash command execution with proper shell for continuous processes", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'continuous execution'; while true; do sleep 1; done", }, }); mockExeca.mockResolvedValue({ stdout: "continuous execution", stderr: "", exitCode: 0 }); await syncForever(); // Verify that bash is used as the shell command expect(mockExeca).toHaveBeenCalledWith( "bash", expect.any(Array), expect.any(Object) ); }); it("should handle multiple different signal types correctly", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\nwhile true; do sleep 1; done", }, }); // Test different signals const signals = ["SIGTERM", "SIGKILL", "SIGHUP", "SIGQUIT"]; for (const signal of signals) { mockExeca.mockClear(); const error = new Error(`Process killed with ${signal}`); (error as any).signal = signal; mockExeca.mockRejectedValue(error); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Only SIGINT should show the user-friendly message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining(`Sync forever failed: Process killed with ${signal}`) ); } }); }); describe("ralph.sh script verification and execution", () => { it("should verify ralph.sh exists before attempting execution", async () => { // Don't create any files const fsAccessSpy = vi.spyOn(fs, "access"); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify fs.access was called to check script existence expect(fsAccessSpy).toHaveBeenCalledWith(join(tempDir, ".repomirror", "ralph.sh")); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found.") ); fsAccessSpy.mockRestore(); }); it("should handle gracefully when .repomirror directory is missing", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Verify the error is caught and handled gracefully expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.") ); expect(processMock.exit).toHaveBeenCalledWith(1); expect(mockExeca).not.toHaveBeenCalled(); fsAccessSpy.mockRestore(); }); it("should handle file system permission errors during script verification", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); const permissionError = new Error("EACCES: permission denied"); (permissionError as any).code = "EACCES"; fsAccessSpy.mockRejectedValue(permissionError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Should treat permission errors as "file not found" for user-friendly message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found. Run 'npx repomirror init' first.") ); fsAccessSpy.mockRestore(); }); it("should only execute ralph.sh after successful verification", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'verified and running'", }, }); mockExeca.mockResolvedValue({ stdout: "verified and running", stderr: "", exitCode: 0 }); await syncForever(); // Verify fs.access was called first expect(fsAccessSpy).toHaveBeenCalledWith(join(tempDir, ".repomirror", "ralph.sh")); // Then execa was called expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "ralph.sh")], { stdio: "inherit", cwd: tempDir, }); fsAccessSpy.mockRestore(); }); it("should specifically look for ralph.sh not sync.sh", async () => { // Create only sync.sh, not ralph.sh await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'sync'", }, }); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // Should specifically complain about ralph.sh being missing expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/ralph.sh not found.") ); }); it("should execute ralph.sh with appropriate permissions expectations", async () => { // Create ralph.sh with executable permissions await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'ralph running'", }, }); // Make script executable (simulate proper init) const scriptPath = join(tempDir, ".repomirror", "ralph.sh"); const stats = await fs.stat(scriptPath); await fs.chmod(scriptPath, stats.mode | 0o111); mockExeca.mockResolvedValue({ stdout: "ralph running", stderr: "", exitCode: 0 }); await syncForever(); expect(mockExeca).toHaveBeenCalledWith("bash", [scriptPath], { stdio: "inherit", cwd: tempDir, }); }); it("should work with ralph.sh that contains typical continuous sync logic", async () => { const typicalRalphScript = `#!/bin/bash while :; do ./.repomirror/sync.sh echo -e "===SLEEP===\\n===SLEEP===\\n"; echo 'looping'; sleep 10; done`; await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": typicalRalphScript, }, }); mockExeca.mockResolvedValue({ stdout: "Typical ralph execution", stderr: "", exitCode: 0 }); await syncForever(); expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "ralph.sh")], { stdio: "inherit", cwd: tempDir, } ); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running ralph.sh (continuous sync)...")); }); it("should demonstrate continuous loop verification through script content", async () => { const continuousLoopScript = `#!/bin/bash set -euo pipefail echo "Starting continuous sync loop..." while true; do echo "[$(date)] Running sync cycle" if ./.repomirror/sync.sh; then echo "[$(date)] Sync completed successfully" else echo "[$(date)] Sync failed, will retry in 10 seconds" fi echo -e "===SLEEP===\\n===SLEEP===\\n" echo "Waiting 10 seconds before next cycle..." sleep 10 done`; await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": continuousLoopScript, }, }); // Mock long-running process that demonstrates continuous behavior mockExeca.mockResolvedValue({ stdout: "Starting continuous sync loop...\n[date] Running sync cycle", stderr: "", exitCode: 0 }); await syncForever(); // Verify the continuous script is executed properly expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "ralph.sh")], expect.objectContaining({ stdio: "inherit", // This allows the continuous output to be seen }) ); }); it("should verify ralph.sh runs forever through proper execution setup", async () => { const foreverScript = `#!/bin/bash set -euo pipefail # Setup trap for graceful shutdown trap 'echo "Shutting down gracefully..."; exit 0' SIGINT SIGTERM echo "Starting forever loop..." counter=0 while true; do ((counter++)) echo "[Cycle $counter] Running sync..." # Simulate sync work if ./.repomirror/sync.sh; then echo "[Cycle $counter] Sync completed" else echo "[Cycle $counter] Sync failed, continuing..." fi echo -e "===SLEEP===\\n===SLEEP===\\n" echo "Sleeping 10 seconds before next cycle..." sleep 10 done`; await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": foreverScript, }, }); // Mock a long-running process that eventually gets interrupted const longRunningOutput = "Starting forever loop...\n[Cycle 1] Running sync...\n[Cycle 1] Sync completed\n===SLEEP===\n===SLEEP===\nSleeping 10 seconds before next cycle..."; mockExeca.mockResolvedValue({ stdout: longRunningOutput, stderr: "", exitCode: 0 }); await syncForever(); // Verify the forever script is properly executed expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "ralph.sh")], expect.objectContaining({ stdio: "inherit", // Essential for monitoring continuous output cwd: tempDir, }) ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("Running ralph.sh (continuous sync)...") ); expect(consoleMock.log).toHaveBeenCalledWith( expect.stringContaining("Press Ctrl+C to stop") ); }); }); describe("enhanced signal handling and process management", () => { it("should handle SIGTERM gracefully as a shutdown signal", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\ntrap 'exit 0' SIGTERM; while true; do sleep 1; done", }, }); const sigtermError = new Error("Process terminated"); (sigtermError as any).signal = "SIGTERM"; mockExeca.mockRejectedValue(sigtermError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); // SIGTERM should be treated as an error, not graceful user stop expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Process terminated") ); expect(consoleMock.log).not.toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should properly distinguish SIGINT from other termination signals", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\nwhile true; do sleep 1; done", }, }); // Test SIGINT (user interrupt) - should be handled gracefully const sigintError = new Error("User interrupted"); (sigintError as any).signal = "SIGINT"; mockExeca.mockRejectedValue(sigintError); await syncForever(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); expect(processMock.exit).not.toHaveBeenCalled(); // Reset mocks and test non-SIGINT signal mockExeca.mockClear(); consoleMock.log.mockClear(); consoleMock.error.mockClear(); const sigkillError = new Error("Process killed"); (sigkillError as any).signal = "SIGKILL"; mockExeca.mockRejectedValue(sigkillError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Process killed") ); expect(consoleMock.log).not.toHaveBeenCalledWith(expect.stringContaining("Stopped by user")); }); }); describe("shell script execution and process management", () => { it("should execute ralph.sh with bash shell for proper script interpretation", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\nset -euo pipefail\necho 'bash specific features'", }, }); mockExeca.mockResolvedValue({ stdout: "bash specific features", stderr: "", exitCode: 0 }); await syncForever(); // Verify bash is specifically used as the shell expect(mockExeca).toHaveBeenCalledWith( "bash", expect.arrayContaining([join(tempDir, ".repomirror", "ralph.sh")]), expect.any(Object) ); }); it("should pass correct execution context to subprocess", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho \"CWD: $PWD\"", }, }); mockExeca.mockResolvedValue({ stdout: `CWD: ${tempDir}`, stderr: "", exitCode: 0 }); await syncForever(); expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "ralph.sh")], expect.objectContaining({ stdio: "inherit", // For real-time output cwd: tempDir, // Correct working directory }) ); }); it("should maintain process inheritance for continuous execution monitoring", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": `#!/bin/bash while true; do echo "[$(date)] Continuous execution..." sleep 1 done`, }, }); // Mock a process that produces continuous output mockExeca.mockResolvedValue({ stdout: "[date] Continuous execution...\n[date] Continuous execution...", stderr: "", exitCode: 0 }); await syncForever(); // Verify stdio inheritance allows real-time monitoring expect(mockExeca).toHaveBeenCalledWith( expect.any(String), expect.any(Array), expect.objectContaining({ stdio: "inherit", // Critical for continuous process monitoring }) ); }); it("should handle script execution failures with proper error context", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\nexit 42", }, }); const executionError = new Error("Script execution failed with exit code 42"); (executionError as any).exitCode = 42; (executionError as any).stderr = "Script error output"; mockExeca.mockRejectedValue(executionError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Script execution failed with exit code 42") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle permission denied during script execution", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "ralph.sh": "#!/bin/bash\necho 'should not run'", }, }); const permissionError = new Error("Permission denied"); (permissionError as any).code = "EACCES"; (permissionError as any).errno = -13; mockExeca.mockRejectedValue(permissionError); await expect(syncForever()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync forever failed: Permission denied") ); }); }); }); ================================================ FILE: tests/commands/sync-one.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import { createTempDir, cleanupTempDir, mockConsole, mockProcess, createMockFileStructure, } from "../helpers/test-utils"; // Mock the sync function since sync-one is just an alias const mockSync = vi.fn(); vi.mock("../../src/commands/sync", () => ({ sync: mockSync, })); // Import the module after mocking const { syncOne } = await import("../../src/commands/sync-one"); describe("sync-one command", () => { let tempDir: string; let consoleMock: ReturnType; let processMock: ReturnType; beforeEach(async () => { // Create temporary directory tempDir = await createTempDir("repomirror-sync-one-"); // Setup mocks consoleMock = mockConsole(); processMock = mockProcess(true); // Mock process.cwd to return our temp directory processMock.cwd.mockReturnValue(tempDir); // Clear all mocks vi.clearAllMocks(); }); afterEach(async () => { // Cleanup temp directory await cleanupTempDir(tempDir); // Restore all mocks vi.restoreAllMocks(); }); describe("alias functionality", () => { it("should call the sync function when executed", async () => { // Mock sync to resolve successfully mockSync.mockResolvedValue(undefined); await syncOne(); // Verify that the sync function was called exactly once expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); }); it("should pass through successful execution from sync", async () => { // Mock sync to resolve successfully mockSync.mockResolvedValue(undefined); const result = await syncOne(); // Verify successful completion (no return value) expect(result).toBeUndefined(); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should pass through errors from sync function", async () => { // Mock sync to reject with an error const syncError = new Error("Sync failed"); mockSync.mockRejectedValue(syncError); // Verify that syncOne propagates the error await expect(syncOne()).rejects.toThrow("Sync failed"); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should pass through process exit calls from sync", async () => { // Mock sync to throw a process exit error (as our test mock does) mockSync.mockRejectedValue(new Error("Process exit called with code 1")); await expect(syncOne()).rejects.toThrow("Process exit called with code 1"); expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("integration behavior", () => { it("should maintain the same interface as sync command", async () => { mockSync.mockResolvedValue(undefined); // syncOne should be a function that returns a Promise const result = syncOne(); expect(result).toBeInstanceOf(Promise); await result; expect(mockSync).toHaveBeenCalled(); }); it("should handle multiple consecutive calls", async () => { mockSync.mockResolvedValue(undefined); // Call syncOne multiple times await syncOne(); await syncOne(); await syncOne(); // Each call should result in a call to sync expect(mockSync).toHaveBeenCalledTimes(3); }); it("should handle async errors properly", async () => { // Test different types of errors that sync might throw const errors = [ new Error("File not found"), new Error("Permission denied"), new Error("Script execution failed"), ]; for (const error of errors) { mockSync.mockClear(); mockSync.mockRejectedValue(error); await expect(syncOne()).rejects.toThrow(error.message); expect(mockSync).toHaveBeenCalledTimes(1); } }); }); describe("command consistency", () => { it("should behave identically to sync command for successful execution", async () => { // Create a real sync environment to verify behavior is consistent await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": `#!/bin/bash echo "Test sync execution"`, }, }); mockSync.mockResolvedValue(undefined); await syncOne(); // Verify the same sync function is called with the same parameters expect(mockSync).toHaveBeenCalledWith(); }); it("should maintain error handling consistency with sync", async () => { // Test that sync-one doesn't add any additional error handling const syncError = new Error("Custom sync error with specific message"); mockSync.mockRejectedValue(syncError); try { await syncOne(); // Should not reach here expect(true).toBe(false); } catch (error) { // Error should be exactly the same as what sync threw expect(error).toBe(syncError); } expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("documentation and clarity", () => { it("should be clear that sync-one is an alias for sync", async () => { mockSync.mockResolvedValue(undefined); // The function name and behavior should make it clear this is an alias await syncOne(); // Should delegate entirely to sync function expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); }); it("should be a true alias with zero functional differences from sync", async () => { mockSync.mockResolvedValue(undefined); // Verify that syncOne is literally just a wrapper around sync // with no additional logic, parameters, or side effects await syncOne(); // Should call sync exactly once with no arguments expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); // Should not have any other function calls or side effects expect(consoleMock.log).not.toHaveBeenCalled(); expect(consoleMock.error).not.toHaveBeenCalled(); expect(consoleMock.warn).not.toHaveBeenCalled(); }); it("should not add any additional functionality beyond sync", async () => { mockSync.mockResolvedValue(undefined); const startTime = Date.now(); await syncOne(); const endTime = Date.now(); // Should complete quickly since it's just a function call expect(endTime - startTime).toBeLessThan(100); expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("error propagation", () => { it("should propagate file not found errors", async () => { const fileError = new Error("Process exit called with code 1"); mockSync.mockRejectedValue(fileError); await expect(syncOne()).rejects.toThrow("Process exit called with code 1"); }); it("should propagate script execution errors", async () => { const execError = new Error("Script execution failed"); mockSync.mockRejectedValue(execError); await expect(syncOne()).rejects.toThrow("Script execution failed"); }); it("should propagate permission errors", async () => { const permError = new Error("Permission denied"); mockSync.mockRejectedValue(permError); await expect(syncOne()).rejects.toThrow("Permission denied"); }); it("should handle sync function returning promises correctly", async () => { // Test that syncOne properly awaits the sync promise let syncResolved = false; mockSync.mockImplementation(async () => { await new Promise(resolve => setTimeout(resolve, 10)); syncResolved = true; }); await syncOne(); expect(syncResolved).toBe(true); expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("type safety and interface", () => { it("should have the same return type as sync", async () => { mockSync.mockResolvedValue(undefined); const result = await syncOne(); // Both sync and syncOne should return Promise expect(result).toBeUndefined(); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should accept no parameters like sync", async () => { mockSync.mockResolvedValue(undefined); // syncOne should not accept any parameters await syncOne(); // Verify sync was called with no parameters expect(mockSync).toHaveBeenCalledWith(); }); it("should be usable in the same contexts as sync", async () => { mockSync.mockResolvedValue(undefined); // Should be able to use syncOne anywhere sync can be used const commands = [syncOne]; for (const command of commands) { await command(); } expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("performance and efficiency", () => { it("should add minimal overhead over sync", async () => { mockSync.mockResolvedValue(undefined); const startTime = process.hrtime.bigint(); await syncOne(); const endTime = process.hrtime.bigint(); // Should complete very quickly as it's just a function call const durationMs = Number(endTime - startTime) / 1_000_000; expect(durationMs).toBeLessThan(50); // Less than 50ms overhead expect(mockSync).toHaveBeenCalledTimes(1); }); it("should not create unnecessary promises or async overhead", async () => { let syncCallCount = 0; mockSync.mockImplementation(async () => { syncCallCount++; return undefined; }); await syncOne(); // Should result in exactly one call to sync expect(syncCallCount).toBe(1); expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("console output verification", () => { it("should not produce any console output directly (all output from sync)", async () => { mockSync.mockResolvedValue(undefined); await syncOne(); // syncOne should not produce any console output itself // All output should come from the sync function it calls expect(consoleMock.log).not.toHaveBeenCalled(); expect(consoleMock.error).not.toHaveBeenCalled(); expect(consoleMock.warn).not.toHaveBeenCalled(); expect(consoleMock.info).not.toHaveBeenCalled(); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should let sync handle all console output on success", async () => { // Mock sync to produce some console output mockSync.mockImplementation(async () => { console.log("Running sync.sh..."); console.log("Sync completed successfully"); return undefined; }); await syncOne(); // Verify sync was called and produced output expect(mockSync).toHaveBeenCalledTimes(1); expect(consoleMock.log).toHaveBeenCalledWith("Running sync.sh..."); expect(consoleMock.log).toHaveBeenCalledWith("Sync completed successfully"); }); it("should let sync handle all console output on error", async () => { // Mock sync to produce error output before throwing mockSync.mockImplementation(async () => { console.error("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first."); throw new Error("Process exit called with code 1"); }); await expect(syncOne()).rejects.toThrow("Process exit called with code 1"); // Verify sync was called and produced error output expect(mockSync).toHaveBeenCalledTimes(1); expect(consoleMock.error).toHaveBeenCalledWith("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first."); }); it("should preserve the exact console output from sync", async () => { const testMessages = [ "Running sync.sh...", "Processing files...", "Sync completed successfully" ]; mockSync.mockImplementation(async () => { testMessages.forEach(msg => console.log(msg)); return undefined; }); await syncOne(); // Verify all messages were logged in the correct order expect(mockSync).toHaveBeenCalledTimes(1); testMessages.forEach(msg => { expect(consoleMock.log).toHaveBeenCalledWith(msg); }); expect(consoleMock.log).toHaveBeenCalledTimes(testMessages.length); }); }); describe("argument passing verification", () => { it("should pass no arguments to sync (both take zero parameters)", async () => { mockSync.mockResolvedValue(undefined); // Call syncOne with no arguments (as it should be called) await syncOne(); // Verify sync was called with exactly zero arguments expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); expect(mockSync).toHaveBeenCalledWith(...[]); // Explicitly verify no args }); it("should handle the fact that neither command accepts parameters", async () => { mockSync.mockResolvedValue(undefined); // syncOne doesn't accept parameters, just like sync const result = await syncOne(); // Verify the call signature matches sync exactly expect(result).toBeUndefined(); expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); }); it("should maintain parameter consistency with sync command", async () => { mockSync.mockResolvedValue(undefined); // Both commands should have identical function signatures // syncOne: () => Promise // sync: () => Promise // Test that syncOne behaves exactly like sync would await syncOne(); expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); // Clear mocks and test direct sync call for comparison mockSync.mockClear(); await mockSync(); // Both calls should be identical expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); }); }); describe("delegation verification", () => { it("should delegate 100% of functionality to sync", async () => { mockSync.mockResolvedValue(undefined); await syncOne(); // syncOne should do nothing except call sync expect(mockSync).toHaveBeenCalledTimes(1); expect(mockSync).toHaveBeenCalledWith(); // No other system calls should be made by syncOne itself expect(processMock.exit).not.toHaveBeenCalled(); expect(consoleMock.log).not.toHaveBeenCalled(); expect(consoleMock.error).not.toHaveBeenCalled(); }); it("should delegate error handling entirely to sync", async () => { const customError = new Error("Custom delegation test error"); mockSync.mockRejectedValue(customError); try { await syncOne(); expect(true).toBe(false); // Should not reach here } catch (error) { // Error should be the exact same object from sync expect(error).toBe(customError); expect(error.message).toBe("Custom delegation test error"); } expect(mockSync).toHaveBeenCalledTimes(1); }); it("should delegate success handling entirely to sync", async () => { const customReturnValue = undefined; // sync returns Promise mockSync.mockResolvedValue(customReturnValue); const result = await syncOne(); // Result should be exactly what sync returned expect(result).toBe(customReturnValue); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should delegate all async behavior to sync", async () => { let syncStarted = false; let syncCompleted = false; mockSync.mockImplementation(async () => { syncStarted = true; await new Promise(resolve => setTimeout(resolve, 50)); syncCompleted = true; return undefined; }); // Before calling syncOne, sync should not have started expect(syncStarted).toBe(false); expect(syncCompleted).toBe(false); const promise = syncOne(); // After calling syncOne but before awaiting, sync should have started expect(syncStarted).toBe(true); expect(syncCompleted).toBe(false); await promise; // After awaiting, sync should be completed expect(syncCompleted).toBe(true); expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("comprehensive error propagation", () => { it("should propagate all error types without modification", async () => { const errorTypes = [ new Error("Standard error"), new TypeError("Type error"), new ReferenceError("Reference error"), new SyntaxError("Syntax error"), { name: "CustomError", message: "Custom error object" }, "String error", 42, // Number error null, undefined ]; for (const error of errorTypes) { mockSync.mockClear(); mockSync.mockRejectedValue(error); try { await syncOne(); expect(true).toBe(false); // Should not reach here } catch (caughtError) { // Error should be exactly the same object/value expect(caughtError).toBe(error); } expect(mockSync).toHaveBeenCalledTimes(1); } }); it("should preserve error stack traces", async () => { const errorWithStack = new Error("Error with stack trace"); const originalStack = errorWithStack.stack; mockSync.mockRejectedValue(errorWithStack); try { await syncOne(); expect(true).toBe(false); // Should not reach here } catch (error) { // Stack trace should be preserved expect(error.stack).toBe(originalStack); } expect(mockSync).toHaveBeenCalledTimes(1); }); it("should handle promise rejection timing correctly", async () => { let rejectionHandled = false; mockSync.mockImplementation(async () => { await new Promise(resolve => setTimeout(resolve, 10)); throw new Error("Delayed rejection"); }); try { await syncOne(); expect(true).toBe(false); // Should not reach here } catch (error) { rejectionHandled = true; expect(error.message).toBe("Delayed rejection"); } expect(rejectionHandled).toBe(true); expect(mockSync).toHaveBeenCalledTimes(1); }); }); describe("edge cases and robustness", () => { it("should handle sync returning resolved promises correctly", async () => { // Test with already resolved promise mockSync.mockReturnValue(Promise.resolve(undefined)); const result = await syncOne(); expect(result).toBeUndefined(); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should handle sync returning rejected promises correctly", async () => { // Test with already rejected promise const error = new Error("Pre-rejected promise"); mockSync.mockReturnValue(Promise.reject(error)); await expect(syncOne()).rejects.toThrow("Pre-rejected promise"); expect(mockSync).toHaveBeenCalledTimes(1); }); it("should work correctly when called in quick succession", async () => { mockSync.mockResolvedValue(undefined); // Fire off multiple calls simultaneously const promises = [ syncOne(), syncOne(), syncOne() ]; await Promise.all(promises); // Each call should result in a separate call to sync expect(mockSync).toHaveBeenCalledTimes(3); }); it("should handle mixed success and failure scenarios", async () => { // First call succeeds mockSync.mockResolvedValue(undefined); await syncOne(); expect(mockSync).toHaveBeenCalledTimes(1); // Second call fails mockSync.mockClear(); mockSync.mockRejectedValue(new Error("Second call failed")); await expect(syncOne()).rejects.toThrow("Second call failed"); expect(mockSync).toHaveBeenCalledTimes(1); // Third call succeeds again mockSync.mockClear(); mockSync.mockResolvedValue(undefined); await syncOne(); expect(mockSync).toHaveBeenCalledTimes(1); }); }); }); ================================================ FILE: tests/commands/sync.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import { createTempDir, cleanupTempDir, mockConsole, mockProcess, createMockFileStructure, } from "../helpers/test-utils"; // Mock external dependencies at module level const mockExeca = vi.fn(); vi.mock("execa", () => ({ execa: mockExeca, })); // Import the module after mocking const { sync } = await import("../../src/commands/sync"); describe("sync command", () => { let tempDir: string; let consoleMock: ReturnType; let processMock: ReturnType; beforeEach(async () => { // Create temporary directory tempDir = await createTempDir("repomirror-sync-"); // Setup mocks consoleMock = mockConsole(); processMock = mockProcess(true); // Throw on process.exit by default // Mock process.cwd to return our temp directory processMock.cwd.mockReturnValue(tempDir); // Clear all mocks vi.clearAllMocks(); }); afterEach(async () => { // Cleanup temp directory await cleanupTempDir(tempDir); // Restore all mocks vi.restoreAllMocks(); }); describe("successful execution", () => { it("should execute sync.sh successfully when script exists", async () => { // Create .repomirror directory and sync.sh script await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": `#!/bin/bash cat .repomirror/prompt.md | \\ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir ../target | \\ tee -a .repomirror/claude_output.jsonl | \\ npx repomirror visualize --debug;`, }, }); // Mock successful execa execution mockExeca.mockResolvedValue({ stdout: "Sync completed", stderr: "", exitCode: 0, }); // Run sync await sync(); // Verify execa was called with correct parameters expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: tempDir, }); // Verify console output expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running sync.sh...")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); // Verify process.exit was not called expect(processMock.exit).not.toHaveBeenCalled(); }); it("should use correct working directory and script path", async () => { // Create .repomirror directory and sync.sh script await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'test sync'", }, }); // Mock successful execa execution mockExeca.mockResolvedValue({ stdout: "test sync", stderr: "", exitCode: 0, }); await sync(); const expectedScriptPath = join(tempDir, ".repomirror", "sync.sh"); // Verify execa was called with absolute path to sync.sh expect(mockExeca).toHaveBeenCalledWith("bash", [expectedScriptPath], { stdio: "inherit", cwd: tempDir, }); }); it("should inherit stdio for interactive output", async () => { // Create .repomirror directory and sync.sh script await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'interactive output'", }, }); mockExeca.mockResolvedValue({ stdout: "interactive output", stderr: "", exitCode: 0, }); await sync(); // Verify stdio: "inherit" was passed to execa expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "sync.sh")], expect.objectContaining({ stdio: "inherit", }) ); }); }); describe("error cases", () => { it("should exit with error when .repomirror/sync.sh does not exist", async () => { // Don't create the sync.sh script await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Verify error message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); // Verify execa was not called expect(mockExeca).not.toHaveBeenCalled(); }); it("should exit with error when .repomirror directory does not exist", async () => { // Don't create the .repomirror directory at all await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Verify error message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); // Verify execa was not called expect(mockExeca).not.toHaveBeenCalled(); }); it("should handle script execution errors gracefully", async () => { // Create .repomirror directory and sync.sh script await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\nexit 1", }, }); // Mock failed execa execution const scriptError = new Error("Script execution failed"); (scriptError as any).exitCode = 1; mockExeca.mockRejectedValue(scriptError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Verify initial success message expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running sync.sh...")); // Verify error message expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: Script execution failed") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle non-Error exceptions in script execution", async () => { // Create .repomirror directory and sync.sh script await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'failing'", }, }); // Mock execa to throw a non-Error object mockExeca.mockRejectedValue("String error"); await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Verify error message handles non-Error exceptions expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: String error") ); // Verify process.exit was called with code 1 expect(processMock.exit).toHaveBeenCalledWith(1); }); }); describe("console output verification", () => { beforeEach(async () => { // Create .repomirror directory and sync.sh script for all output tests await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'sync output'", }, }); }); it("should show cyan colored 'Running sync.sh...' message", async () => { mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await sync(); // Check that the running message was logged expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running sync.sh...")); }); it("should show green colored success message on completion", async () => { mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await sync(); // Check that the success message was logged expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); }); it("should show red colored error message on failure", async () => { const error = new Error("Command failed"); mockExeca.mockRejectedValue(error); await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Check that the error message was logged expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: Command failed") ); }); it("should show red colored error message when sync.sh is missing", async () => { // Remove the sync.sh script await fs.rm(join(tempDir, ".repomirror", "sync.sh")); await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Check that the missing file error message was logged expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); }); }); describe("file system access patterns", () => { it("should use fs.access to check file existence", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); // Create the script so access check passes await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'test'", }, }); mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await sync(); // Verify fs.access was called with the correct path expect(fsAccessSpy).toHaveBeenCalledWith(join(tempDir, ".repomirror", "sync.sh")); fsAccessSpy.mockRestore(); }); it("should handle permission denied errors on file access", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); fsAccessSpy.mockRejectedValue(new Error("EACCES: permission denied")); await expect(sync()).rejects.toThrow("Process exit called with code 1"); // Verify the error message still shows file not found (since we catch all access errors) expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); fsAccessSpy.mockRestore(); }); }); describe("shell script execution with different exit codes", () => { it("should handle script that exits with code 2", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\nexit 2", }, }); // Mock execa to reject with exit code 2 const scriptError = new Error("Command failed with exit code 2"); (scriptError as any).exitCode = 2; (scriptError as any).stderr = "Error: Invalid argument"; mockExeca.mockRejectedValue(scriptError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running sync.sh...")); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: Command failed with exit code 2") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle script that exits with code 127 (command not found)", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\nnonexistent_command", }, }); const scriptError = new Error("Command failed: nonexistent_command: command not found"); (scriptError as any).exitCode = 127; mockExeca.mockRejectedValue(scriptError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: Command failed: nonexistent_command: command not found") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle script that succeeds with non-zero but success exit code", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'success with warnings'\nexit 0", }, }); mockExeca.mockResolvedValue({ stdout: "success with warnings", stderr: "warning: deprecated option used", exitCode: 0 }); await sync(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Running sync.sh...")); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); expect(processMock.exit).not.toHaveBeenCalled(); }); }); describe("stdout and stderr capture handling", () => { it("should handle scripts that output to stdout", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'Processing files...'\necho 'Sync complete'", }, }); // Note: With stdio: 'inherit', stdout/stderr go directly to console, not captured mockExeca.mockResolvedValue({ stdout: "Processing files...\nSync complete", stderr: "", exitCode: 0 }); await sync(); // Verify execa was called with stdio: 'inherit' which means output goes directly to console expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: tempDir, }); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); }); it("should handle scripts that output to stderr", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'warning message' >&2\nexit 0", }, }); mockExeca.mockResolvedValue({ stdout: "", stderr: "warning message", exitCode: 0 }); await sync(); expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: tempDir, }); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); }); it("should handle scripts with mixed stdout and stderr output", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'Starting sync'\necho 'warning' >&2\necho 'Finished'", }, }); mockExeca.mockResolvedValue({ stdout: "Starting sync\nFinished", stderr: "warning", exitCode: 0 }); await sync(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); expect(processMock.exit).not.toHaveBeenCalled(); }); }); describe("permission and execution error handling", () => { it("should handle permission denied when executing script", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'test'", }, }); // Mock execa to reject with permission denied error const permissionError = new Error("Permission denied"); (permissionError as any).code = "EACCES"; mockExeca.mockRejectedValue(permissionError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: Permission denied") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle bash command not found error", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'test'", }, }); const commandError = new Error("bash: command not found"); (commandError as any).code = "ENOENT"; mockExeca.mockRejectedValue(commandError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: bash: command not found") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle file system errors during script execution", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'test'", }, }); const fsError = new Error("EIO: i/o error, read"); (fsError as any).code = "EIO"; mockExeca.mockRejectedValue(fsError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: EIO: i/o error, read") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); }); describe("working directory context preservation", () => { it("should maintain current working directory after sync execution", async () => { const originalCwd = tempDir; processMock.cwd.mockReturnValue(originalCwd); await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\ncd / && echo 'changed directory'", }, }); mockExeca.mockResolvedValue({ stdout: "changed directory", stderr: "", exitCode: 0 }); await sync(); // Verify that the working directory is preserved in the execa call expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: originalCwd, }); // The process.cwd() should still return the original directory expect(processMock.cwd).toHaveBeenCalled(); }); it("should handle scripts that change directory internally", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": `#!/bin/bash cd subdir echo "Working in $(pwd)" cd .. echo "Back to $(pwd)"`, }, "subdir": {}, }); mockExeca.mockResolvedValue({ stdout: `Working in ${tempDir}/subdir\nBack to ${tempDir}`, stderr: "", exitCode: 0 }); await sync(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: tempDir, }); }); it("should work correctly when invoked from different working directories", async () => { // Create nested directory structure const nestedDir = join(tempDir, "nested", "deep"); await fs.mkdir(nestedDir, { recursive: true }); // Create script in the nested directory await createMockFileStructure(nestedDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'nested sync'", }, }); // Mock cwd to return the nested directory processMock.cwd.mockReturnValue(nestedDir); mockExeca.mockResolvedValue({ stdout: "nested sync", stderr: "", exitCode: 0 }); await sync(); // Should use the nested directory path expect(mockExeca).toHaveBeenCalledWith( "bash", [join(nestedDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: nestedDir, } ); }); }); describe("script verification and existence checks", () => { it("should verify script exists before execution using fs.access", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'test'", }, }); mockExeca.mockResolvedValue({ stdout: "test", stderr: "", exitCode: 0 }); await sync(); // Verify fs.access was called to check file existence expect(fsAccessSpy).toHaveBeenCalledWith(join(tempDir, ".repomirror", "sync.sh")); fsAccessSpy.mockRestore(); }); it("should handle fs.access throwing ENOENT error", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); const enoentError = new Error("ENOENT: no such file or directory"); (enoentError as any).code = "ENOENT"; fsAccessSpy.mockRejectedValue(enoentError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); expect(mockExeca).not.toHaveBeenCalled(); fsAccessSpy.mockRestore(); }); it("should handle fs.access throwing ENOTDIR error", async () => { // Create a file where the .repomirror directory should be await fs.writeFile(join(tempDir, ".repomirror"), "not a directory"); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); expect(mockExeca).not.toHaveBeenCalled(); }); it("should handle script that exists but is not readable", async () => { const fsAccessSpy = vi.spyOn(fs, "access"); const permissionError = new Error("EACCES: permission denied"); (permissionError as any).code = "EACCES"; fsAccessSpy.mockRejectedValue(permissionError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Error: .repomirror/sync.sh not found. Run 'npx repomirror init' first.") ); expect(mockExeca).not.toHaveBeenCalled(); fsAccessSpy.mockRestore(); }); }); describe("edge cases", () => { it("should handle empty script content", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "", }, }); mockExeca.mockResolvedValue({ stdout: "", stderr: "", exitCode: 0 }); await sync(); // Should still execute successfully expect(mockExeca).toHaveBeenCalled(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); }); it("should handle script with complex bash syntax", async () => { const complexScript = `#!/bin/bash set -euo pipefail # Complex sync script with pipes and redirects cat .repomirror/prompt.md | \\ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir ../target | \\ tee -a .repomirror/claude_output.jsonl | \\ npx repomirror visualize --debug if [ $? -eq 0 ]; then echo "Sync successful" else echo "Sync failed" >&2 exit 1 fi`; await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": complexScript, }, }); mockExeca.mockResolvedValue({ stdout: "Sync successful", stderr: "", exitCode: 0 }); await sync(); // Should execute the complex script successfully expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: tempDir, }); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); }); it("should handle scripts in different working directories", async () => { const subdirPath = join(tempDir, "subdir"); await fs.mkdir(subdirPath, { recursive: true }); // Mock cwd to return subdirectory processMock.cwd.mockReturnValue(subdirPath); // Create script in subdirectory's .repomirror folder await createMockFileStructure(subdirPath, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'subdir sync'", }, }); mockExeca.mockResolvedValue({ stdout: "subdir sync", stderr: "", exitCode: 0 }); await sync(); // Should use the subdirectory as working directory expect(mockExeca).toHaveBeenCalledWith( "bash", [join(subdirPath, ".repomirror", "sync.sh")], expect.objectContaining({ cwd: subdirPath, }) ); }); it("should handle very large script files", async () => { // Create a large script with many lines const largeScript = [ "#!/bin/bash", "set -e", ...Array(1000).fill(0).map((_, i) => `echo "Line ${i}"`), "echo 'Large script completed'" ].join("\n"); await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": largeScript, }, }); mockExeca.mockResolvedValue({ stdout: "Large script completed", stderr: "", exitCode: 0 }); await sync(); expect(mockExeca).toHaveBeenCalledWith("bash", [join(tempDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: tempDir, }); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); }); it("should handle scripts with special characters in path", async () => { // Test script execution when the path contains special characters const specialDir = join(tempDir, "dir with spaces"); await fs.mkdir(specialDir, { recursive: true }); processMock.cwd.mockReturnValue(specialDir); await createMockFileStructure(specialDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'special path sync'", }, }); mockExeca.mockResolvedValue({ stdout: "special path sync", stderr: "", exitCode: 0 }); await sync(); expect(mockExeca).toHaveBeenCalledWith( "bash", [join(specialDir, ".repomirror", "sync.sh")], { stdio: "inherit", cwd: specialDir, } ); }); it("should handle script execution timeout scenarios", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\nsleep 300", }, }); // Mock a timeout error const timeoutError = new Error("Command timed out after 30000 milliseconds"); (timeoutError as any).timedOut = true; mockExeca.mockRejectedValue(timeoutError); await expect(sync()).rejects.toThrow("Process exit called with code 1"); expect(consoleMock.error).toHaveBeenCalledWith( expect.stringContaining("Sync failed: Command timed out after 30000 milliseconds") ); expect(processMock.exit).toHaveBeenCalledWith(1); }); it("should handle scripts with binary output", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\nprintf '\\x00\\x01\\x02\\x03'", }, }); // Mock binary output mockExeca.mockResolvedValue({ stdout: Buffer.from([0, 1, 2, 3]).toString(), stderr: "", exitCode: 0 }); await sync(); expect(consoleMock.log).toHaveBeenCalledWith(expect.stringContaining("Sync completed successfully")); expect(processMock.exit).not.toHaveBeenCalled(); }); }); describe("process and signal handling", () => { it("should preserve working directory context", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\npwd", }, }); mockExeca.mockResolvedValue({ stdout: tempDir, stderr: "", exitCode: 0 }); await sync(); // Verify that execa is called with the correct working directory expect(mockExeca).toHaveBeenCalledWith( "bash", [join(tempDir, ".repomirror", "sync.sh")], expect.objectContaining({ cwd: tempDir, }) ); }); it("should handle bash command execution with proper shell", async () => { await createMockFileStructure(tempDir, { ".repomirror": { "sync.sh": "#!/bin/bash\necho 'bash execution'", }, }); mockExeca.mockResolvedValue({ stdout: "bash execution", stderr: "", exitCode: 0 }); await sync(); // Verify that bash is used as the shell command expect(mockExeca).toHaveBeenCalledWith( "bash", expect.any(Array), expect.any(Object) ); }); }); }); ================================================ FILE: tests/commands/visualize.test.ts ================================================ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { createInterface } from "node:readline"; // Mock process.stdout.write const mockStdoutWrite = vi.fn(); vi.mock("node:readline", () => ({ createInterface: vi.fn(), })); // Import the module after mocking const { visualize } = await import("../../src/commands/visualize"); describe("visualize command", () => { let mockReadlineInterface: any; let originalProcessArgv: string[]; let stdoutWriteSpy: any; beforeEach(() => { // Save original values originalProcessArgv = [...process.argv]; // Mock readline interface mockReadlineInterface = { on: vi.fn(), }; vi.mocked(createInterface).mockReturnValue(mockReadlineInterface); // Mock process.stdout.write stdoutWriteSpy = vi.spyOn(process.stdout, 'write').mockImplementation(mockStdoutWrite); // Clear all mocks vi.clearAllMocks(); }); afterEach(() => { // Restore original values process.argv = originalProcessArgv; vi.restoreAllMocks(); }); describe("initialization", () => { it("should create readline interface correctly", () => { visualize(); expect(createInterface).toHaveBeenCalledWith({ input: process.stdin, crlfDelay: Infinity, }); }); it("should setup event listeners on readline interface", () => { visualize(); expect(mockReadlineInterface.on).toHaveBeenCalledWith("line", expect.any(Function)); expect(mockReadlineInterface.on).toHaveBeenCalledWith("close", expect.any(Function)); }); }); describe("debug mode detection", () => { it("should enable debug mode when --debug is in process.argv", () => { process.argv = ["node", "script.js", "--debug"]; visualize(); // Get the line handler from the mock call const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; // Test with a valid JSON line const testJson = { type: "system", message: "test" }; lineHandler(JSON.stringify(testJson)); // Should include timestamp when debug mode is enabled expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("[") ); }); it("should enable debug mode when options.debug is true", () => { visualize({ debug: true }); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; const testJson = { type: "system", message: "test" }; lineHandler(JSON.stringify(testJson)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("[") ); }); it("should not include timestamp in normal mode", () => { process.argv = ["node", "script.js"]; visualize(); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; const testJson = { type: "system", message: "test" }; lineHandler(JSON.stringify(testJson)); // Should not include timestamp bracket const calls = mockStdoutWrite.mock.calls; const hasTimestamp = calls.some(call => typeof call[0] === 'string' && call[0].includes('[2') // ISO timestamp starts with year ); expect(hasTimestamp).toBe(false); }); }); describe("parsing different message types", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should handle system messages", () => { const systemMessage = { type: "system", subtype: "init", message: "System initialization" }; lineHandler(JSON.stringify(systemMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("System") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("init") ); }); it("should handle user messages with text content", () => { const userMessage = { type: "user", message: { content: [{ text: "This is a user message with some content that should be truncated if too long" }] } }; lineHandler(JSON.stringify(userMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("User") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("This is a user message") ); }); it("should handle assistant messages", () => { const assistantMessage = { type: "assistant", message: { content: [{ type: "text", text: "This is an assistant response\nwith multiple lines\nof content\nthat should be truncated" }] } }; lineHandler(JSON.stringify(assistantMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Assistant") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("This is an assistant response") ); }); it("should handle result messages", () => { const resultMessage = { type: "result", result: "Final result content goes here" }; lineHandler(JSON.stringify(resultMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("=== Final Result ===") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Final result content goes here") ); }); }); describe("tool call handling", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should handle tool calls with Read tool", () => { const toolCall = { type: "assistant", message: { content: [{ id: "tool_123", name: "Read", input: { file_path: "/path/to/file.ts" } }] } }; lineHandler(JSON.stringify(toolCall)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Read") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("/path/to/file.ts") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Waiting for result...") ); }); it("should handle tool calls with Bash tool", () => { const toolCall = { type: "assistant", message: { content: [{ id: "tool_456", name: "Bash", input: { command: "ls -la", cwd: "/home/user", timeout: 5000 } }] } }; lineHandler(JSON.stringify(toolCall)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Bash") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("ls -la") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("cwd: /home/user") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("timeout: 5000ms") ); }); it("should handle tool calls with Edit tool", () => { const toolCall = { type: "assistant", message: { content: [{ id: "tool_789", name: "Edit", input: { file_path: "/path/to/file.ts", old_string: "function oldImplementation() {", new_string: "function newImplementation() {", limit: 100, offset: 50 } }] } }; lineHandler(JSON.stringify(toolCall)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Edit") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("/path/to/file.ts") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("replace:") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("limit: 100") ); }); it("should handle tool results", () => { const toolResult = { type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_123", content: "File content line 1\nFile content line 2\nFile content line 3\nMore content...", is_error: false }] } }; lineHandler(JSON.stringify(toolResult)); // When a tool result comes in without a matching tool call, // it gets stored in pendingResults and doesn't output anything immediately // This is correct behavior - it's waiting for the tool call expect(mockStdoutWrite).not.toHaveBeenCalled(); }); it("should handle error tool results without matching tool calls", () => { const errorResult = { type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_456", content: "Error: File not found", is_error: true }] } }; lineHandler(JSON.stringify(errorResult)); // Same behavior - error results are also stored and don't output immediately expect(mockStdoutWrite).not.toHaveBeenCalled(); }); it("should display tool results when matched with tool calls", () => { // First send a tool call const toolCall = { type: "assistant", message: { content: [{ id: "tool_123", name: "Read", input: { file_path: "/test/file.ts" } }] } }; lineHandler(JSON.stringify(toolCall)); mockStdoutWrite.mockClear(); // Clear the tool call output // Then send the result - this should display them together const toolResult = { type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_123", content: "File content line 1\nFile content line 2\nFile content line 3\nMore content...", is_error: false }] } }; lineHandler(JSON.stringify(toolResult)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Tool Result"); expect(outputStrings).toContain("4 lines"); expect(outputStrings).toContain("File content line 1"); }); }); describe("todo list handling", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should format TodoWrite tool calls specially", () => { const todoMessage = { type: "assistant", message: { content: [{ name: "TodoWrite", input: { todos: [ { status: "completed", content: "Read the source file", priority: "high" }, { status: "in_progress", content: "Parse the JSON data", priority: "medium" }, { status: "pending", content: "Write tests for edge cases", priority: "low" } ] } }] } }; lineHandler(JSON.stringify(todoMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("📋") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Todo List Update") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("✅") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("🔄") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("⏸️") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("← ACTIVE") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("33% done") ); }); it("should handle empty todo lists", () => { const emptyTodoMessage = { type: "assistant", message: { content: [{ name: "TodoWrite", input: { todos: [] } }] } }; lineHandler(JSON.stringify(emptyTodoMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Todo List Update") ); }); }); describe("tool call and result pairing", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should pair tool calls with their results when call comes first", () => { // Send tool call first const toolCall = { type: "assistant", message: { content: [{ id: "tool_123", name: "Read", input: { file_path: "/test/file.ts" } }] } }; lineHandler(JSON.stringify(toolCall)); // Then send result const toolResult = { type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_123", content: "File contents here", is_error: false }] } }; // Clear previous calls mockStdoutWrite.mockClear(); lineHandler(JSON.stringify(toolResult)); // Should display them together now expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Read") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("✅") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Tool Result") ); }); it("should pair tool calls with their results when result comes first", () => { // Send result first const toolResult = { type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_456", content: "Result content", is_error: false }] } }; lineHandler(JSON.stringify(toolResult)); // Then send call const toolCall = { type: "assistant", message: { content: [{ id: "tool_456", name: "Grep", input: { pattern: "test.*pattern" } }] } }; lineHandler(JSON.stringify(toolCall)); // Should display them together expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Grep") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("✅") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Tool Result") ); }); }); describe("color formatting", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should apply correct colors for different message types", () => { const messages = [ { type: "system", expected: "\x1b[35m" }, // magenta { type: "user", expected: "\x1b[34m" }, // blue { type: "assistant", expected: "\x1b[32m" }, // green { type: "tool_use", expected: "\x1b[36m" }, // cyan { type: "tool_result", expected: "\x1b[33m" }, // yellow ]; messages.forEach(({ type, expected }) => { mockStdoutWrite.mockClear(); lineHandler(JSON.stringify({ type, message: "test" })); const calls = mockStdoutWrite.mock.calls; const hasExpectedColor = calls.some(call => typeof call[0] === 'string' && call[0].includes(expected) ); expect(hasExpectedColor).toBe(true); }); }); it("should reset colors properly", () => { const message = { type: "system", message: "test" }; lineHandler(JSON.stringify(message)); const calls = mockStdoutWrite.mock.calls; const hasResetColor = calls.some(call => typeof call[0] === 'string' && call[0].includes("\x1b[0m") ); expect(hasResetColor).toBe(true); }); it("should use error colors when tool results are displayed", () => { // First send a tool call const toolCall = { type: "assistant", message: { content: [{ id: "tool_123", name: "Read", input: { file_path: "/test/file.ts" } }] } }; lineHandler(JSON.stringify(toolCall)); mockStdoutWrite.mockClear(); // Clear the tool call output // Then send an error result - this should display them together with error colors const errorResult = { type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_123", content: "Error message", is_error: true }] } }; lineHandler(JSON.stringify(errorResult)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("\x1b[31m"); // red color for ERROR }); }); describe("error handling", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should handle invalid JSON gracefully", () => { const invalidJson = "{ invalid json here"; lineHandler(invalidJson); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Parse Error") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("{ invalid json here") ); }); it("should handle empty lines gracefully", () => { lineHandler(""); lineHandler(" "); lineHandler("\n"); // Should not throw or produce error output for empty lines const errorCalls = mockStdoutWrite.mock.calls.filter(call => typeof call[0] === 'string' && call[0].includes("Parse Error") ); expect(errorCalls).toHaveLength(0); }); it("should handle malformed message structures", () => { const malformedMessages = [ { type: "assistant" }, // missing message { type: "user", message: null }, // null message { type: "tool_result" }, // missing required fields ]; malformedMessages.forEach(message => { expect(() => { lineHandler(JSON.stringify(message)); }).not.toThrow(); }); }); it("should handle JSON with syntax errors", () => { const malformedJsons = [ '{"type": "system", "message": "incomplete', '{"type": "user" "message": "missing colon"}', '{"type": "assistant", "message": {"content": [{"text": "unclosed quote}]}}', '{"type": []}', // invalid type 'null', 'undefined', '{"type": "system", "message": "test", }', // trailing comma ]; malformedJsons.forEach(json => { mockStdoutWrite.mockClear(); lineHandler(json); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Parse Error") ); }); }); it("should handle extremely long lines gracefully", () => { const longMessage = "a".repeat(10000); const longJson = JSON.stringify({ type: "system", message: longMessage }); expect(() => { lineHandler(longJson); }).not.toThrow(); // Should process the message successfully even if it's large expect(mockStdoutWrite).toHaveBeenCalled(); }); it("should handle non-string JSON values in content", () => { const messageWithNumbers = { type: "user", message: { content: [{ text: 12345 // number instead of string }] } }; expect(() => { lineHandler(JSON.stringify(messageWithNumbers)); }).not.toThrow(); }); it("should handle circular reference errors gracefully", () => { // Create a message that would cause issues when stringifying internally const messageWithUndefined = { type: "system", message: undefined, subtype: "test" }; expect(() => { lineHandler(JSON.stringify(messageWithUndefined)); }).not.toThrow(); }); }); describe("final message handling", () => { let lineHandler: Function; let closeHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; closeHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "close" )[1]; }); it("should display final assistant message on close", () => { const finalMessage = { type: "assistant", message: { content: [{ type: "text", text: "This is the final assistant message that should be displayed fully." }] } }; lineHandler(JSON.stringify(finalMessage)); closeHandler(); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("=== Final Assistant Message ===") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("This is the final assistant message that should be displayed fully.") ); }); it("should not display final message if last was a tool call", () => { const toolCall = { type: "assistant", message: { content: [{ id: "tool_123", name: "Read", input: { file_path: "/test" } }] } }; lineHandler(JSON.stringify(toolCall)); mockStdoutWrite.mockClear(); closeHandler(); const finalMessageCalls = mockStdoutWrite.mock.calls.filter(call => typeof call[0] === 'string' && call[0].includes("=== Final Assistant Message ===") ); expect(finalMessageCalls).toHaveLength(0); }); it("should not display final message if no text content", () => { const messageWithoutText = { type: "assistant", message: { content: [{ type: "image", data: "base64data" }] } }; lineHandler(JSON.stringify(messageWithoutText)); mockStdoutWrite.mockClear(); closeHandler(); const finalMessageCalls = mockStdoutWrite.mock.calls.filter(call => typeof call[0] === 'string' && call[0].includes("=== Final Assistant Message ===") ); expect(finalMessageCalls).toHaveLength(0); }); }); describe("message content truncation and formatting", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should truncate long user messages", () => { const longMessage = "a".repeat(100); const userMessage = { type: "user", message: { content: [{ text: longMessage }] } }; lineHandler(JSON.stringify(userMessage)); const calls = mockStdoutWrite.mock.calls; const hasEllipsis = calls.some(call => typeof call[0] === 'string' && call[0].includes("...") ); expect(hasEllipsis).toBe(true); }); it("should show limited lines from assistant messages", () => { const multilineMessage = { type: "assistant", message: { content: [{ type: "text", text: "Line 1\nLine 2\nLine 3\nLine 4\nLine 5" }] } }; lineHandler(JSON.stringify(multilineMessage)); const calls = mockStdoutWrite.mock.calls; const hasEllipsis = calls.some(call => typeof call[0] === 'string' && call[0].includes("...") ); expect(hasEllipsis).toBe(true); }); it("should show usage statistics when available", () => { const messageWithUsage = { type: "assistant", message: { usage: { input_tokens: 150, output_tokens: 75 }, content: [{ type: "text", text: "Response with usage stats" }] } }; lineHandler(JSON.stringify(messageWithUsage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("150/75 tokens") ); }); it("should handle messages with only zero token usage", () => { const messageWithZeroUsage = { type: "assistant", message: { usage: { input_tokens: 0, output_tokens: 0 }, content: [{ type: "text", text: "Response with zero usage" }] } }; lineHandler(JSON.stringify(messageWithZeroUsage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("0/0 tokens") ); }); it("should handle partial usage statistics", () => { const messageWithPartialUsage = { type: "assistant", message: { usage: { input_tokens: 100 // missing output_tokens }, content: [{ type: "text", text: "Response with partial usage" }] } }; lineHandler(JSON.stringify(messageWithPartialUsage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("100/0 tokens") ); }); it("should handle messages with Unicode characters", () => { const unicodeMessage = { type: "user", message: { content: [{ text: "Hello 🌍 World! 中文字符 Émojis 🎉 and more: ñáéíóú" }] } }; lineHandler(JSON.stringify(unicodeMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Hello 🌍 World!") ); }); it("should handle newlines and special characters in content", () => { const specialCharMessage = { type: "assistant", message: { content: [{ type: "text", text: "Line with \\t tab\\nLine with \\r return\\nLine with \\\\backslash" }] } }; lineHandler(JSON.stringify(specialCharMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Line with"); }); it("should format different content item types correctly", () => { const mixedContentMessage = { type: "assistant", message: { content: [ { type: "text", text: "Text content" }, { type: "image", source: { type: "base64", media_type: "image/png", data: "abc123" } }, { type: "tool_use", id: "tool_1", name: "Read", input: { file_path: "/test" } } ] } }; lineHandler(JSON.stringify(mixedContentMessage)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("3 content items") ); }); }); describe("comprehensive JSONL parsing from stdin", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should process multiple JSONL lines sequentially", () => { const jsonlLines = [ JSON.stringify({ type: "system", subtype: "init", message: "Starting session" }), JSON.stringify({ type: "user", message: { content: [{ text: "User request" }] } }), JSON.stringify({ type: "assistant", message: { content: [{ type: "text", text: "Assistant response" }] } }), JSON.stringify({ type: "result", result: "Final output" }) ]; jsonlLines.forEach(line => { lineHandler(line); }); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("System") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("User") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Assistant") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("=== Final Result ===") ); }); it("should handle interleaved tool calls and results in JSONL stream", () => { const jsonlSequence = [ JSON.stringify({ type: "assistant", message: { content: [{ id: "tool_1", name: "Read", input: { file_path: "/test1.ts" } }] } }), JSON.stringify({ type: "assistant", message: { content: [{ id: "tool_2", name: "Grep", input: { pattern: "test" } }] } }), JSON.stringify({ type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_1", content: "File contents 1", is_error: false }] } }), JSON.stringify({ type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_2", content: "Search results", is_error: false }] } }) ]; jsonlSequence.forEach(line => { lineHandler(line); }); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Read"); expect(outputStrings).toContain("Grep"); expect(outputStrings).toContain("Tool Result"); }); it("should handle streaming assistant responses", () => { const streamingMessages = [ JSON.stringify({ type: "assistant", message: { content: [{ type: "text", text: "Let me help" }] } }), JSON.stringify({ type: "assistant", message: { content: [{ type: "text", text: "Let me help you" }] } }), JSON.stringify({ type: "assistant", message: { content: [{ type: "text", text: "Let me help you with that" }] } }) ]; streamingMessages.forEach(line => { lineHandler(line); }); const assistantCalls = mockStdoutWrite.mock.calls.filter(call => typeof call[0] === 'string' && call[0].includes('Assistant') ); expect(assistantCalls).toHaveLength(3); }); }); describe("different message types comprehensive coverage", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should handle info message type", () => { const infoMessage = { type: "info", message: "Information message", level: "info" }; lineHandler(JSON.stringify(infoMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Info"); }); it("should handle warning message type", () => { const warningMessage = { type: "warning", message: "Warning message", level: "warning" }; lineHandler(JSON.stringify(warningMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Warning"); }); it("should handle error message type", () => { const errorMessage = { type: "error", message: "Error message", error: { code: "E001", description: "Something went wrong" } }; lineHandler(JSON.stringify(errorMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Error"); }); it("should handle tool_use message type directly", () => { const toolUseMessage = { type: "tool_use", name: "DirectTool", input: { parameter: "value" } }; lineHandler(JSON.stringify(toolUseMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Tool_use"); }); it("should handle debug message type", () => { const debugMessage = { type: "debug", message: "Debug information", details: { step: 1, context: "parsing" } }; lineHandler(JSON.stringify(debugMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Debug"); }); it("should handle progress message type", () => { const progressMessage = { type: "progress", message: "Processing files", progress: { current: 5, total: 10, percentage: 50 } }; lineHandler(JSON.stringify(progressMessage)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Progress"); }); it("should handle unknown message types gracefully", () => { const unknownMessage = { type: "custom_type", message: "Custom message format", data: { custom: "data" } }; expect(() => { lineHandler(JSON.stringify(unknownMessage)); }).not.toThrow(); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Custom_type"); }); }); describe("stream processing and buffering", () => { it("should handle rapid successive JSONL lines", () => { visualize(); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; // Simulate rapid input const rapidMessages = Array.from({ length: 100 }, (_, i) => JSON.stringify({ type: "system", message: `Message ${i}` }) ); rapidMessages.forEach(line => { lineHandler(line); }); // Should handle all messages without errors expect(mockStdoutWrite.mock.calls.length).toBeGreaterThan(0); }); it("should handle large JSON objects in stream", () => { visualize(); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; // Create a large message const largeContent = "Large content ".repeat(1000); const largeMessage = { type: "assistant", message: { content: [{ type: "text", text: largeContent }], usage: { input_tokens: 5000, output_tokens: 3000 } } }; expect(() => { lineHandler(JSON.stringify(largeMessage)); }).not.toThrow(); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("5000/3000 tokens") ); }); it("should maintain state across multiple tool call/result pairs", () => { visualize(); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; // Send multiple interleaved tool calls and results const toolCall1 = JSON.stringify({ type: "assistant", message: { content: [{ id: "tool_1", name: "Read", input: { file_path: "/file1" } }] } }); const toolCall2 = JSON.stringify({ type: "assistant", message: { content: [{ id: "tool_2", name: "Edit", input: { file_path: "/file2" } }] } }); const result1 = JSON.stringify({ type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_1", content: "File 1 content", is_error: false }] } }); const result2 = JSON.stringify({ type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_2", content: "File 2 edited", is_error: false }] } }); // Send in mixed order: call1, call2, result1, result2 lineHandler(toolCall1); lineHandler(toolCall2); mockStdoutWrite.mockClear(); lineHandler(result1); // Should display tool1 + result1 together let outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Read"); expect(outputStrings).toContain("File 1 content"); mockStdoutWrite.mockClear(); lineHandler(result2); // Should display tool2 + result2 together outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("Edit"); expect(outputStrings).toContain("File 2 edited"); }); }); describe("comprehensive tool call variations", () => { let lineHandler: Function; beforeEach(() => { visualize(); lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; }); it("should handle Write tool calls", () => { const writeToolCall = { type: "assistant", message: { content: [{ id: "tool_write", name: "Write", input: { file_path: "/new/file.ts", content: "New file content" } }] } }; lineHandler(JSON.stringify(writeToolCall)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Write") ); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("/new/file.ts") ); }); it("should handle Glob tool calls", () => { const globToolCall = { type: "assistant", message: { content: [{ id: "tool_glob", name: "Glob", input: { pattern: "**/*.ts", path: "/src" } }] } }; lineHandler(JSON.stringify(globToolCall)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Glob") ); // The implementation shows path first as the main argument, then pattern in details expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("/src") ); }); it("should handle WebFetch tool calls", () => { const webFetchCall = { type: "assistant", message: { content: [{ id: "tool_web", name: "WebFetch", input: { url: "https://example.com", prompt: "Extract the main content from this page" } }] } }; lineHandler(JSON.stringify(webFetchCall)); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("WebFetch") ); // The implementation shows prompt first as the main argument, not URL expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("Extract the main content") ); }); it("should handle tool calls with multiple complex parameters", () => { const complexToolCall = { type: "assistant", message: { content: [{ id: "tool_complex", name: "ComplexTool", input: { query: "search term", limit: 50, offset: 10, include: "*.json", timeout: 30000, old_string: "function oldName() { return 'old'; }", new_string: "function newName() { return 'new'; }" } }] } }; lineHandler(JSON.stringify(complexToolCall)); const outputStrings = mockStdoutWrite.mock.calls.map(call => call[0]).join(''); expect(outputStrings).toContain("ComplexTool"); expect(outputStrings).toContain("search term"); expect(outputStrings).toContain("limit: 50"); expect(outputStrings).toContain("offset: 10"); expect(outputStrings).toContain("timeout: 30000ms"); expect(outputStrings).toContain("replace:"); }); }); describe("debug mode comprehensive behavior", () => { it("should display timestamps in ISO format in debug mode", () => { const mockDate = new Date('2023-12-01T10:30:45.123Z'); vi.spyOn(global, 'Date').mockImplementation(() => mockDate); visualize({ debug: true }); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; lineHandler(JSON.stringify({ type: "system", message: "test" })); expect(mockStdoutWrite).toHaveBeenCalledWith( expect.stringContaining("[2023-12-01T10:30:45.123Z]") ); vi.restoreAllMocks(); }); it("should handle debug mode with complex message sequences", () => { visualize({ debug: true }); const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; const complexSequence = [ JSON.stringify({ type: "system", subtype: "start", message: "Starting" }), JSON.stringify({ type: "assistant", message: { content: [{ id: "tool_1", name: "Read", input: { file_path: "/test" } }] } }), JSON.stringify({ type: "user", message: { content: [{ type: "tool_result", tool_use_id: "tool_1", content: "file content", is_error: false }] } }), JSON.stringify({ type: "assistant", message: { content: [{ type: "text", text: "Final response" }] } }) ]; complexSequence.forEach(line => { lineHandler(line); }); // All messages should have timestamps const timestampCalls = mockStdoutWrite.mock.calls.filter(call => typeof call[0] === 'string' && call[0].includes('[') ); expect(timestampCalls.length).toBeGreaterThan(0); }); it("should work correctly when both debug option and argv flag are present", () => { process.argv = ["node", "script.js", "--debug"]; visualize({ debug: true }); // Both option and argv const lineHandler = mockReadlineInterface.on.mock.calls.find( (call) => call[0] === "line" )[1]; lineHandler(JSON.stringify({ type: "system", message: "test" })); // Should still show timestamps (not double) const calls = mockStdoutWrite.mock.calls.filter(call => typeof call[0] === 'string' && call[0].includes('[') && call[0].includes(']') ); expect(calls.length).toBeGreaterThan(0); }); }); }); ================================================ FILE: tests/helpers/fixtures.ts ================================================ /** * Test fixtures and mock data for repomirror tests */ export const mockRepoConfig = { sourceRepo: "./", targetRepo: "../target", transformationInstructions: "transform python to typescript", }; export const mockTransformationPrompt = `Your job is to port ./ monorepo (Python) to ../target (TypeScript) and maintain the repository. You have access to the current ./ repository as well as the ../target repository. Make a commit and push your changes after every single file edit. Use the ../target/agent/ directory as a scratchpad for your work. Store long term plans and todo lists there. The original project was mostly tested by manually running the code. When porting, you will need to write end to end and unit tests for the project. But make sure to spend most of your time on the actual porting, not on the testing. A good heuristic is to spend 80% of your time on the actual porting, and 20% on the testing.`; export const mockSyncScript = `#!/bin/bash cat .repomirror/prompt.md | \\ claude -p --output-format=stream-json --verbose --dangerously-skip-permissions --add-dir ../target | \\ tee -a .repomirror/claude_output.jsonl | \\ npx repomirror visualize --debug;`; export const mockRalphScript = `#!/bin/bash while :; do ./.repomirror/sync.sh echo -e "===SLEEP===\\n===SLEEP===\\n"; echo 'looping'; sleep 10; done`; export const mockGitConfig = `[core] \trepositoryformatversion = 0 \tfilemode = true \tbare = false \tlogallrefupdates = true [remote "origin"] \turl = https://github.com/test/repo.git \tfetch = +refs/heads/*:refs/remotes/origin/*`; export const mockFileStructure = { "package.json": JSON.stringify({ name: "test-repo", version: "1.0.0", description: "Test repository" }, null, 2), "README.md": "# Test Repository\n\nThis is a test repository.", "src": { "index.ts": "export function hello() { return 'world'; }", "utils": { "helper.ts": "export function helper() { return true; }" } } }; export const mockClaudeOutput = { type: "result", is_error: false, result: mockTransformationPrompt }; export const mockCommandResponses = { "git rev-parse --git-dir": { stdout: ".git", exitCode: 0 }, "git remote -v": { stdout: "origin\thttps://github.com/test/repo.git (fetch)\norigin\thttps://github.com/test/repo.git (push)", exitCode: 0 }, "claude": { stdout: "Hi there! How can I help you today?", exitCode: 0 } }; export const mockInquirerResponses = { sourceRepo: "./", targetRepo: "../target", transformationInstructions: "transform python to typescript" }; ================================================ FILE: tests/helpers/index.ts ================================================ /** * Test helpers and utilities index */ export * from "./test-utils"; export * from "./fixtures"; ================================================ FILE: tests/helpers/test-utils.ts ================================================ import { vi } from "vitest"; import { promises as fs } from "fs"; import { join } from "path"; import { tmpdir } from "os"; /** * Test utility functions for repomirror tests */ /** * Create a temporary directory for testing */ export async function createTempDir(prefix: string = "repomirror-test-"): Promise { const tempPath = join(tmpdir(), `${prefix}${Date.now()}-${Math.random().toString(36).substring(7)}`); await fs.mkdir(tempPath, { recursive: true }); return tempPath; } /** * Clean up a temporary directory */ export async function cleanupTempDir(path: string): Promise { try { await fs.rm(path, { recursive: true, force: true }); } catch (error) { // Ignore cleanup errors in tests console.warn(`Failed to cleanup temp directory ${path}:`, error); } } /** * Create a mock git repository in a directory */ export async function createMockGitRepo(repoPath: string, withRemote: boolean = true): Promise { await fs.mkdir(join(repoPath, ".git"), { recursive: true }); // Create basic git config files await fs.writeFile(join(repoPath, ".git", "config"), `[core] \trepositoryformatversion = 0 \tfilemode = true \tbare = false \tlogallrefupdates = true ${withRemote ? `[remote "origin"] \turl = https://github.com/test/repo.git \tfetch = +refs/heads/*:refs/remotes/origin/*` : ''} `); await fs.writeFile(join(repoPath, ".git", "HEAD"), "ref: refs/heads/main\n"); // Create a simple file in the repo await fs.writeFile(join(repoPath, "README.md"), "# Test Repository\n"); } /** * Mock console methods for testing */ export function mockConsole() { const consoleMock = { log: vi.fn(), error: vi.fn(), warn: vi.fn(), info: vi.fn(), }; vi.spyOn(console, "log").mockImplementation(consoleMock.log); vi.spyOn(console, "error").mockImplementation(consoleMock.error); vi.spyOn(console, "warn").mockImplementation(consoleMock.warn); vi.spyOn(console, "info").mockImplementation(consoleMock.info); return consoleMock; } /** * Mock process methods for testing */ export function mockProcess(shouldThrowOnExit: boolean = true) { const processMock = { exit: shouldThrowOnExit ? vi.fn().mockImplementation((code?: number) => { throw new Error(`Process exit called with code ${code}`); }) : vi.fn(), cwd: vi.fn(), }; vi.spyOn(process, "exit").mockImplementation(processMock.exit as any); vi.spyOn(process, "cwd").mockImplementation(processMock.cwd); return processMock; } /** * Create a mock file structure */ export async function createMockFileStructure( basePath: string, structure: Record> ): Promise { for (const [name, content] of Object.entries(structure)) { const fullPath = join(basePath, name); if (typeof content === "string") { // It's a file await fs.mkdir(join(fullPath, ".."), { recursive: true }); await fs.writeFile(fullPath, content); } else { // It's a directory await fs.mkdir(fullPath, { recursive: true }); await createMockFileStructure(fullPath, content); } } } /** * Wait for a specified amount of time (for async tests) */ export function delay(ms: number): Promise { return new Promise(resolve => setTimeout(resolve, ms)); } /** * Mock inquirer prompts for testing */ export function mockInquirer(responses: Record) { return { prompt: vi.fn().mockResolvedValue(responses) }; } /** * Mock ora spinner for testing */ export function mockOra() { const spinnerMock = { start: vi.fn().mockReturnThis(), succeed: vi.fn().mockReturnThis(), fail: vi.fn().mockReturnThis(), stop: vi.fn().mockReturnThis(), }; return vi.fn().mockReturnValue(spinnerMock); } /** * Mock execa for command execution testing */ export function mockExeca(responses: Record = {}) { return vi.fn().mockImplementation((command: string, args: string[] = []) => { const fullCommand = `${command} ${args.join(" ")}`.trim(); const response = responses[fullCommand] || responses[command] || { stdout: "", exitCode: 0 }; if (response.exitCode && response.exitCode !== 0) { const error = new Error(`Command failed: ${fullCommand}`) as any; error.exitCode = response.exitCode; error.stderr = response.stderr || ""; throw error; } return Promise.resolve({ stdout: response.stdout || "", stderr: response.stderr || "", exitCode: response.exitCode || 0, }); }); } ================================================ FILE: tests/setup.ts ================================================ /** * Global test setup for vitest */ import { beforeEach, afterEach } from "vitest"; // Global test setup beforeEach(() => { // Reset environment variables or global state if needed }); afterEach(() => { // Clean up any global state after each test }); ================================================ FILE: tsconfig.json ================================================ { "compilerOptions": { "target": "ES2022", "module": "commonjs", "lib": ["ES2022"], "outDir": "./dist", "rootDir": "./src", "strict": true, "esModuleInterop": true, "skipLibCheck": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "declaration": true, "declarationMap": true, "sourceMap": true, "moduleResolution": "node" }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "**/*.test.ts"] } ================================================ FILE: vitest.config.ts ================================================ import { defineConfig } from "vitest/config"; import { resolve } from "path"; export default defineConfig({ test: { // Test environment configuration environment: "node", // Global test setup globals: true, setupFiles: ["./tests/setup.ts"], // Test file patterns include: [ "tests/**/*.{test,spec}.{js,ts}", "src/**/*.{test,spec}.{js,ts}" ], exclude: [ "**/node_modules/**", "**/dist/**", "**/.git/**" ], // Coverage configuration coverage: { provider: "v8", reporter: ["text", "json", "html"], reportsDirectory: "coverage", include: ["src/**/*.ts"], exclude: [ "src/**/*.{test,spec}.ts", "src/**/__tests__/**", "dist/**", "node_modules/**" ], thresholds: { global: { branches: 80, functions: 80, lines: 80, statements: 80 } } }, // Test timeouts testTimeout: 10000, hookTimeout: 10000, // Reporter configuration reporter: ["verbose"], // Watch mode configuration watch: false, // Concurrency settings maxConcurrency: 5 }, // Resolve configuration for imports resolve: { alias: { "@": resolve(__dirname, "./src"), "@tests": resolve(__dirname, "./tests") } }, // Define configuration for TypeScript esbuild: { target: "es2022" } });