[+] Force push detection & sync
Build executables / Windows x86_64 (push) Has been cancelled
Build executables / macOS universal (push) Has been cancelled
Build executables / Linux arm64 musl static (push) Has been cancelled
Build executables / Linux x86_64 musl static (push) Has been cancelled

This commit is contained in:
2026-05-11 06:04:23 +00:00
parent 3638d774ea
commit ae2bd9aaa1
3 changed files with 709 additions and 5 deletions
+2
View File
@@ -240,6 +240,8 @@ Conflict resolution strategies are configured per mirror group:
When a previously opened conflict pull request is merged, the next sync sees the merged branch as the winning tip, pushes it to the other endpoints, and closes stale `refray/conflicts/...` pull requests for that branch.
Force-pushes are propagated only when `refray` can infer intent from the previous successful sync state. If a branch previously matched everywhere, one endpoint rewrites that branch to a non-descendant tip, and the other endpoints still have the previous tip, `refray` writes local backup refs and a bundle under the work-dir `backups/` directory before force-pushing the rewritten tip to the other endpoints. If multiple endpoints rewrite the branch differently, or another endpoint also advances independently, the branch is treated as a conflict and skipped.
Repository and branch deletion are propagated only when it is safe to infer intent, and `refray` writes local backup refs and bundle files under the work-dir `backups/` directory before propagating those deletions. If a repository existed on every endpoint in the previous successful sync, then disappears from one endpoint while the remaining endpoints still have the previous synced refs, `refray` deletes it from the remaining endpoints instead of recreating it when `delete_missing = true`. If `delete_missing = false`, that missing repository is not treated as a deletion and normal missing-repository handling applies. If the repository was deleted everywhere, `refray` removes its saved sync state after creating a local backup from the mirror cache. If the repository was deleted on one endpoint but changed elsewhere, it is treated as a conflict and skipped.
Branch deletion follows the same rule at branch scope: if a branch existed on every endpoint in the previous successful sync, then disappears from one endpoint while the remaining endpoints still have the previous tip, `refray` deletes it from the remaining endpoints instead of recreating it. If the branch was deleted on one endpoint but changed elsewhere, it is treated as a conflict and skipped.
+277 -3
View File
@@ -1399,6 +1399,27 @@ fn push_repo_refs(
fail_on_unresolved_conflict(context, "branch deletion conflict")?;
}
let (force_pushes, force_push_conflicts, force_push_branches) =
branch_force_push_decisions(mirror_repo, remotes, previous_refs, current_refs)?;
let had_force_push_conflicts = !force_push_conflicts.is_empty();
for conflict in &force_push_conflicts {
crate::logln!(
" {} branch {} has conflicting force-push changes on {} ({}, {})",
style("conflict").yellow().bold(),
style(&conflict.branch).cyan(),
conflict.remotes.join("+"),
conflict.reason,
style("skipped").dim()
);
}
if had_force_push_conflicts {
fail_on_unresolved_conflict(context, "branch force-push conflict")?;
}
let blocked_branches = blocked_branches
.union(&force_push_branches)
.cloned()
.collect::<BTreeSet<_>>();
let (branches, conflicts) = mirror_repo.branch_decisions(remotes)?;
let branches_to_push = branches
.into_iter()
@@ -1423,6 +1444,7 @@ fn push_repo_refs(
}
}
let had_branch_conflicts = !unresolved_branch_conflicts.is_empty();
let force_push_updates = force_push_updates(&force_pushes);
let unresolved_branch_names = unresolved_branch_conflicts
.iter()
.map(|conflict| conflict.branch.clone())
@@ -1463,13 +1485,17 @@ fn push_repo_refs(
let pushed_branch_names = branch_names(&branches_to_push);
let rebased_branch_names = branch_names_from_updates(&rebased_branch_updates);
let force_pushed_branch_names = branch_names_from_updates(&force_push_updates);
let mut cleanup_branches = stale_conflict_branches;
cleanup_branches.retain(|branch| {
!pushed_branch_names.contains(branch) && !rebased_branch_names.contains(branch)
!pushed_branch_names.contains(branch)
&& !rebased_branch_names.contains(branch)
&& !force_pushed_branch_names.contains(branch)
});
if branches_to_push.is_empty()
&& rebased_branch_updates.is_empty()
&& force_push_updates.is_empty()
&& tags_to_push.is_empty()
&& unresolved_branch_conflicts.is_empty()
{
@@ -1499,7 +1525,10 @@ fn push_repo_refs(
);
return Ok(RepoRefSyncResult {
pushed: false,
had_conflicts: had_branch_conflicts || had_tag_conflicts || had_deletion_conflicts,
had_conflicts: had_branch_conflicts
|| had_tag_conflicts
|| had_deletion_conflicts
|| had_force_push_conflicts,
});
}
if !branch_deletions.is_empty() {
@@ -1522,6 +1551,18 @@ fn push_repo_refs(
mirror_repo.push_branch_updates(remotes, &rebased_branch_updates)?;
close_resolved_pull_requests(context, mirror_repo, remotes, repos, &rebased_branch_names)?;
}
if !force_push_updates.is_empty() {
print_branch_force_pushes(&force_pushes);
backup_force_pushed_branches(context, mirror_repo, repo_name, &force_pushes, current_refs)?;
mirror_repo.push_branch_updates(remotes, &force_push_updates)?;
close_resolved_pull_requests(
context,
mirror_repo,
remotes,
repos,
&force_pushed_branch_names,
)?;
}
if !tags_to_push.is_empty() {
print_tag_decisions(&tags_to_push);
mirror_repo.push_tags(remotes, &tags_to_push)?;
@@ -1541,10 +1582,14 @@ fn push_repo_refs(
Ok(RepoRefSyncResult {
pushed: !branches_to_push.is_empty()
|| !rebased_branch_updates.is_empty()
|| !force_push_updates.is_empty()
|| !tags_to_push.is_empty()
|| !branch_deletions.is_empty()
|| !cleanup_branches.is_empty(),
had_conflicts: had_branch_conflicts || had_tag_conflicts || had_deletion_conflicts,
had_conflicts: had_branch_conflicts
|| had_tag_conflicts
|| had_deletion_conflicts
|| had_force_push_conflicts,
})
}
@@ -1576,6 +1621,34 @@ fn backup_deleted_branches(
Ok(())
}
fn backup_force_pushed_branches(
context: &RepoSyncContext<'_>,
mirror_repo: &GitMirror,
repo_name: &str,
force_pushes: &[BranchForcePush],
current_refs: &BTreeMap<String, RemoteRefState>,
) -> Result<()> {
if context.dry_run {
crate::logln!(
" {} {} force-push backup{}",
style("dry-run").yellow().bold(),
style("would create").dim(),
if force_pushes.len() == 1 { "" } else { "s" }
);
return Ok(());
}
let stamp = backup_stamp()?;
let backups = force_push_ref_backups(force_pushes, current_refs, &stamp);
if backups.is_empty() {
bail!("cannot back up force-push because no target branch refs were available");
}
let refs = mirror_repo.backup_refs(&backups)?;
let bundle_path = backup_dir(context, repo_name).join(format!("force-push-{stamp}.bundle"));
mirror_repo.create_bundle(&bundle_path, &refs)?;
Ok(())
}
fn backup_branches_deleted_everywhere(
context: &RepoSyncContext<'_>,
mirror_repo: &GitMirror,
@@ -1698,6 +1771,35 @@ fn log_rebase_decision(branch: &str, sha: &str, updates: &[BranchUpdate]) {
);
}
fn print_branch_force_pushes(force_pushes: &[BranchForcePush]) {
for force_push in force_pushes {
crate::logln!(
" {} branch {} {} -> {}",
style("force-push detected").green().bold(),
style(&force_push.branch).cyan(),
force_push.source_remotes.join("+"),
force_push.target_remotes.join("+")
);
}
}
fn force_push_updates(force_pushes: &[BranchForcePush]) -> Vec<BranchUpdate> {
force_pushes
.iter()
.flat_map(|force_push| {
force_push
.target_remotes
.iter()
.map(|target_remote| BranchUpdate {
branch: force_push.branch.clone(),
sha: force_push.sha.clone(),
target_remote: target_remote.clone(),
force: true,
})
})
.collect()
}
fn open_conflict_pull_requests(
context: &RepoSyncContext<'_>,
mirror_repo: &GitMirror,
@@ -1968,6 +2070,38 @@ fn branch_ref_backups(
backups
}
fn force_push_ref_backups(
force_pushes: &[BranchForcePush],
current_refs: &BTreeMap<String, RemoteRefState>,
stamp: &str,
) -> Vec<RefBackup> {
let mut backups = Vec::new();
for force_push in force_pushes {
for remote in &force_push.target_remotes {
let Some(sha) = current_refs
.get(remote)
.and_then(|refs| refs.branches.get(&force_push.branch))
else {
continue;
};
backups.push(RefBackup {
refname: format!(
"refs/refray-backups/force-pushes/{}/{}/{}",
hex_component(&force_push.branch),
stamp,
hex_component(remote)
),
sha: sha.clone(),
description: format!(
"branch {} from {} before propagated force-push",
force_push.branch, remote
),
});
}
}
backups
}
fn branches_deleted_everywhere_backups(
previous_refs: &BTreeMap<String, RemoteRefState>,
current_refs: &BTreeMap<String, RemoteRefState>,
@@ -2094,6 +2228,146 @@ fn safe_ref_component(value: &str) -> String {
output.trim_matches('-').to_string()
}
#[derive(Clone, Debug)]
struct BranchForcePush {
branch: String,
sha: String,
source_remotes: Vec<String>,
target_remotes: Vec<String>,
}
struct BranchForcePushConflict {
branch: String,
remotes: Vec<String>,
reason: String,
}
fn branch_force_push_decisions(
mirror_repo: &GitMirror,
remotes: &[RemoteSpec],
previous_refs: Option<&BTreeMap<String, RemoteRefState>>,
current_refs: &BTreeMap<String, RemoteRefState>,
) -> Result<(
Vec<BranchForcePush>,
Vec<BranchForcePushConflict>,
BTreeSet<String>,
)> {
let Some(previous_refs) = previous_refs else {
return Ok((Vec::new(), Vec::new(), BTreeSet::new()));
};
let remote_names = remotes
.iter()
.map(|remote| remote.name.clone())
.collect::<Vec<_>>();
let mut branches = BTreeSet::new();
for refs in previous_refs.values() {
branches.extend(
refs.branches
.keys()
.filter(|branch| !is_internal_conflict_branch(branch))
.cloned(),
);
}
let mut decisions = Vec::new();
let mut conflicts = Vec::new();
let mut blocked = BTreeSet::new();
for branch in branches {
let previous_by_remote = remote_names
.iter()
.filter_map(|remote| {
previous_refs
.get(remote)
.and_then(|refs| refs.branches.get(&branch))
.map(|sha| (remote.clone(), sha.clone()))
})
.collect::<BTreeMap<_, _>>();
if previous_by_remote.len() != remote_names.len() {
continue;
}
let previous_tips = previous_by_remote
.values()
.cloned()
.collect::<BTreeSet<_>>();
if previous_tips.len() != 1 {
continue;
}
let current_by_remote = remote_names
.iter()
.filter_map(|remote| {
current_refs
.get(remote)
.and_then(|refs| refs.branches.get(&branch))
.map(|sha| (remote.clone(), sha.clone()))
})
.collect::<BTreeMap<_, _>>();
if current_by_remote.len() != remote_names.len() {
continue;
}
let mut target_remotes = Vec::new();
let mut fast_forward_remotes = Vec::new();
let mut force_pushed_by_tip = BTreeMap::<String, Vec<String>>::new();
for remote in &remote_names {
let previous = &previous_by_remote[remote];
let current = &current_by_remote[remote];
if previous == current {
target_remotes.push(remote.clone());
} else if mirror_repo.is_ancestor(previous, current)? {
fast_forward_remotes.push(remote.clone());
} else {
force_pushed_by_tip
.entry(current.clone())
.or_default()
.push(remote.clone());
}
}
if force_pushed_by_tip.is_empty() {
continue;
}
let force_pushed_remotes = force_pushed_by_tip
.values()
.flat_map(|remotes| remotes.iter().cloned())
.collect::<Vec<_>>();
if force_pushed_by_tip.len() == 1 && fast_forward_remotes.is_empty() {
let (sha, source_remotes) = force_pushed_by_tip.into_iter().next().unwrap();
if target_remotes.is_empty() {
continue;
}
blocked.insert(branch.clone());
decisions.push(BranchForcePush {
branch,
sha,
source_remotes,
target_remotes,
});
continue;
}
blocked.insert(branch.clone());
let reason = if force_pushed_by_tip.len() > 1 && !fast_forward_remotes.is_empty() {
format!(
"multiple rewritten tips and fast-forward changes on {}",
fast_forward_remotes.join("+")
)
} else if force_pushed_by_tip.len() > 1 {
"multiple rewritten tips".to_string()
} else {
format!("also fast-forwarded on {}", fast_forward_remotes.join("+"))
};
conflicts.push(BranchForcePushConflict {
branch,
remotes: force_pushed_remotes,
reason,
});
}
Ok((decisions, conflicts, blocked))
}
struct BranchDeletionConflict {
branch: String,
deleted_remotes: Vec<String>,
+430 -2
View File
@@ -25,8 +25,7 @@ const WEBHOOK_SECRET: &str = "refray-e2e-secret";
#[test]
#[ignore = "destructive live-provider e2e test; run explicitly with --ignored"]
fn sequential_live_e2e_all_supported_features() -> Result<()> {
let env = EnvFile::load(Path::new(".env"))?;
let settings = E2eSettings::from_env(&env)?;
let settings = load_e2e_settings()?;
settings.require_destructive_guard()?;
let mut run = E2eRun::new(settings)?;
@@ -59,6 +58,42 @@ fn sequential_live_e2e_all_supported_features() -> Result<()> {
Ok(())
}
#[test]
#[ignore = "destructive live-provider e2e test; run explicitly with --ignored"]
fn sequential_live_e2e_force_push_detection() -> Result<()> {
let settings = load_e2e_settings()?;
settings.require_destructive_guard()?;
let mut run = E2eRun::new(settings)?;
run.preflight()?;
run.clear_repositories()?;
run.write_config(ConflictMode::AutoRebasePullRequest, None, true)?;
eprintln!("e2e phase: force-push rewind");
run.rewind_force_push_propagates()?;
eprintln!("e2e phase: force-push rewrite");
run.rewrite_force_push_propagates()?;
eprintln!("e2e phase: force-push fast-forward guard");
run.normal_fast_forward_still_syncs()?;
eprintln!("e2e phase: force-push conflict");
run.conflicting_force_pushes_are_not_propagated()?;
eprintln!("e2e phase: force-push plus fast-forward conflict");
run.force_push_plus_fast_forward_is_not_propagated()?;
eprintln!("e2e phase: feature branch force-push");
run.feature_branch_force_push_propagates()?;
run.clear_e2e_repositories()?;
Ok(())
}
fn load_e2e_settings() -> Result<E2eSettings> {
let env_path = std::env::var_os("REFRAY_E2E_ENV_FILE")
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from(".env"));
let env = EnvFile::load(&env_path)?;
E2eSettings::from_env(&env)
}
struct EnvFile {
values: HashMap<String, String>,
}
@@ -637,6 +672,218 @@ namespace = "{}"
Ok(())
}
fn rewind_force_push_propagates(&self) -> Result<()> {
let repo = self.repo_name("force-rewind");
let source = self.primary_provider();
self.seed_all_main(&repo, "force rewind base", 1_700_001_701)?;
self.sync_repo(&repo, [])?;
let base = self.branch_sha(source, &repo, MAIN_BRANCH)?;
let old = self.commit_to_provider_branch(
source,
&repo,
MAIN_BRANCH,
"old.txt",
"old\n",
"force rewind old",
1_700_001_702,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &old)?;
self.unprotect_main_all(&repo)?;
self.force_push_provider_branch_to_sha(source, &repo, MAIN_BRANCH, &base)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &base)?;
self.assert_backup_bundle_contains(&repo, &old)?;
Ok(())
}
fn rewrite_force_push_propagates(&self) -> Result<()> {
let repo = self.repo_name("force-rewrite");
let source = self.primary_provider();
self.seed_all_main(&repo, "force rewrite base", 1_700_001_711)?;
self.sync_repo(&repo, [])?;
let base = self.branch_sha(source, &repo, MAIN_BRANCH)?;
let old = self.commit_to_provider_branch(
source,
&repo,
MAIN_BRANCH,
"old.txt",
"old\n",
"force rewrite old",
1_700_001_712,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &old)?;
self.unprotect_main_all(&repo)?;
let rewritten = self.force_rewrite_provider_branch_from(
source,
&repo,
MAIN_BRANCH,
&base,
"rewritten.txt",
"rewritten\n",
"force rewrite new",
1_700_001_713,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &rewritten)?;
self.assert_backup_bundle_contains(&repo, &old)?;
Ok(())
}
fn normal_fast_forward_still_syncs(&self) -> Result<()> {
let repo = self.repo_name("force-fast-forward");
let source = self.primary_provider();
self.seed_all_main(&repo, "force fast-forward base", 1_700_001_721)?;
self.sync_repo(&repo, [])?;
let newer = self.commit_to_provider_branch(
source,
&repo,
MAIN_BRANCH,
"newer.txt",
"newer\n",
"normal fast-forward",
1_700_001_722,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &newer)
}
fn conflicting_force_pushes_are_not_propagated(&self) -> Result<()> {
let repo = self.repo_name("force-conflict");
let (source, peer) = self.provider_pair();
self.seed_all_main(&repo, "force conflict base", 1_700_001_731)?;
self.sync_repo(&repo, [])?;
let base = self.branch_sha(source, &repo, MAIN_BRANCH)?;
let old = self.commit_to_provider_branch(
source,
&repo,
MAIN_BRANCH,
"old.txt",
"old\n",
"force conflict old",
1_700_001_732,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &old)?;
self.unprotect_main_all(&repo)?;
self.force_rewrite_provider_branch_from(
source,
&repo,
MAIN_BRANCH,
&base,
"source.txt",
"source\n",
"source force rewrite",
1_700_001_733,
)?;
self.force_rewrite_provider_branch_from(
peer,
&repo,
MAIN_BRANCH,
&base,
"peer.txt",
"peer\n",
"peer force rewrite",
1_700_001_734,
)?;
let expected_refs = self.branch_refs_by_provider(&repo, MAIN_BRANCH)?;
self.write_config(ConflictMode::Fail, Some(&exact_pattern(&repo)), true)?;
self.sync_repo_expect_failure(&repo, [])?;
self.assert_branch_refs_match(&repo, MAIN_BRANCH, &expected_refs)?;
self.write_config(ConflictMode::AutoRebasePullRequest, None, true)?;
Ok(())
}
fn force_push_plus_fast_forward_is_not_propagated(&self) -> Result<()> {
let repo = self.repo_name("force-plus-fast-forward");
let (source, peer) = self.provider_pair();
self.seed_all_main(&repo, "force plus fast-forward base", 1_700_001_741)?;
self.sync_repo(&repo, [])?;
let base = self.branch_sha(source, &repo, MAIN_BRANCH)?;
let old = self.commit_to_provider_branch(
source,
&repo,
MAIN_BRANCH,
"old.txt",
"old\n",
"force plus fast-forward old",
1_700_001_742,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &old)?;
self.unprotect_main_all(&repo)?;
self.force_rewrite_provider_branch_from(
source,
&repo,
MAIN_BRANCH,
&base,
"rewritten.txt",
"rewritten\n",
"force plus fast-forward rewrite",
1_700_001_743,
)?;
self.commit_to_provider_branch(
peer,
&repo,
MAIN_BRANCH,
"peer-fast-forward.txt",
"peer fast-forward\n",
"peer fast-forward",
1_700_001_744,
)?;
let expected_refs = self.branch_refs_by_provider(&repo, MAIN_BRANCH)?;
self.write_config(ConflictMode::Fail, Some(&exact_pattern(&repo)), true)?;
self.sync_repo_expect_failure(&repo, [])?;
self.assert_branch_refs_match(&repo, MAIN_BRANCH, &expected_refs)?;
self.write_config(ConflictMode::AutoRebasePullRequest, None, true)?;
Ok(())
}
fn feature_branch_force_push_propagates(&self) -> Result<()> {
let repo = self.repo_name("force-feature");
let source = self.primary_provider();
let branch = "feature/force-push";
self.seed_all_main(&repo, "force feature base", 1_700_001_751)?;
self.sync_repo(&repo, [])?;
let main = self.branch_sha(source, &repo, MAIN_BRANCH)?;
let old_feature = self.create_provider_branch(
source,
&repo,
MAIN_BRANCH,
branch,
"feature.txt",
"feature\n",
"feature branch old",
1_700_001_752,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, branch, &old_feature)?;
let rewritten_feature = self.force_rewrite_provider_branch_from(
source,
&repo,
branch,
&main,
"feature-rewritten.txt",
"feature rewritten\n",
"feature branch rewrite",
1_700_001_753,
)?;
self.sync_repo(&repo, [])?;
self.assert_branch_all_at(&repo, branch, &rewritten_feature)?;
self.assert_branch_all_at(&repo, MAIN_BRANCH, &main)?;
self.assert_backup_bundle_contains(&repo, &old_feature)?;
Ok(())
}
fn webhook_commands_and_receiver_work(&self) -> Result<()> {
let repo = self.repo_name("webhook");
let source = self.primary_provider();
@@ -765,6 +1012,129 @@ namespace = "{}"
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn commit_to_provider_branch(
&self,
provider: &ProviderAccount,
repo: &str,
branch: &str,
path: &str,
contents: &str,
message: &str,
timestamp: i64,
) -> Result<String> {
let work = self.clone_repo(
provider,
repo,
&format!(
"commit-{}-{}-{repo}",
provider.site_name,
sanitize_path(branch)
),
)?;
self.checkout_remote_branch(&work, branch)?;
write_commit(&work, path, contents, message, timestamp)?;
let sha = git_output(&work, ["rev-parse", "HEAD"])?;
let refspec = format!("HEAD:{branch}");
self.git(&work, ["push", "origin", &refspec])?;
provider.wait_branch(repo, branch, &sha)?;
provider.wait_repo_listed(repo)?;
Ok(sha)
}
#[allow(clippy::too_many_arguments)]
fn create_provider_branch(
&self,
provider: &ProviderAccount,
repo: &str,
base_branch: &str,
branch: &str,
path: &str,
contents: &str,
message: &str,
timestamp: i64,
) -> Result<String> {
let work = self.clone_repo(
provider,
repo,
&format!(
"branch-{}-{}-{repo}",
provider.site_name,
sanitize_path(branch)
),
)?;
let base_ref = format!("origin/{base_branch}");
self.git(&work, ["checkout", "-B", branch, &base_ref])?;
write_commit(&work, path, contents, message, timestamp)?;
let sha = git_output(&work, ["rev-parse", "HEAD"])?;
let refspec = format!("HEAD:{branch}");
self.git(&work, ["push", "origin", &refspec])?;
provider.wait_branch(repo, branch, &sha)?;
provider.wait_repo_listed(repo)?;
Ok(sha)
}
fn force_push_provider_branch_to_sha(
&self,
provider: &ProviderAccount,
repo: &str,
branch: &str,
sha: &str,
) -> Result<()> {
let work = self.clone_repo(
provider,
repo,
&format!(
"force-to-{}-{}-{repo}",
provider.site_name,
sanitize_path(branch)
),
)?;
self.checkout_remote_branch(&work, branch)?;
self.git(&work, ["reset", "--hard", sha])?;
let refspec = format!("HEAD:{branch}");
self.git(&work, ["push", "--force", "origin", &refspec])?;
provider.wait_branch(repo, branch, sha)?;
provider.wait_repo_listed(repo)
}
#[allow(clippy::too_many_arguments)]
fn force_rewrite_provider_branch_from(
&self,
provider: &ProviderAccount,
repo: &str,
branch: &str,
base_sha: &str,
path: &str,
contents: &str,
message: &str,
timestamp: i64,
) -> Result<String> {
let work = self.clone_repo(
provider,
repo,
&format!(
"force-rewrite-{}-{}-{repo}",
provider.site_name,
sanitize_path(branch)
),
)?;
self.checkout_remote_branch(&work, branch)?;
self.git(&work, ["reset", "--hard", base_sha])?;
write_commit(&work, path, contents, message, timestamp)?;
let sha = git_output(&work, ["rev-parse", "HEAD"])?;
let refspec = format!("HEAD:{branch}");
self.git(&work, ["push", "--force", "origin", &refspec])?;
provider.wait_branch(repo, branch, &sha)?;
provider.wait_repo_listed(repo)?;
Ok(sha)
}
fn checkout_remote_branch(&self, work: &Path, branch: &str) -> Result<()> {
let remote_branch = format!("origin/{branch}");
self.git(work, ["checkout", "-B", branch, &remote_branch])
}
fn clone_repo(&self, provider: &ProviderAccount, repo: &str, label: &str) -> Result<PathBuf> {
let path = self.git_worktree(label)?;
let remote_url = provider.authenticated_repo_url(repo)?;
@@ -1058,6 +1428,34 @@ namespace = "{}"
})
}
fn assert_branch_all_at(&self, repo: &str, branch: &str, expected: &str) -> Result<()> {
retry("branch convergence to expected tip", || {
for (provider, actual) in self.branch_refs_by_provider(repo, branch)? {
if actual != expected {
bail!("branch {branch} on {provider} is at {actual}, expected {expected}");
}
}
Ok(())
})
}
fn assert_branch_refs_match(
&self,
repo: &str,
branch: &str,
expected: &BTreeMap<String, String>,
) -> Result<()> {
retry("branch refs unchanged", || {
let actual = self.branch_refs_by_provider(repo, branch)?;
if &actual != expected {
bail!(
"branch {branch} refs changed unexpectedly for {repo}: expected {expected:?}, got {actual:?}"
);
}
Ok(())
})
}
fn assert_branch_all_equal_after_optional_resync(
&self,
repo: &str,
@@ -1224,6 +1622,36 @@ namespace = "{}"
Ok(output)
}
fn branch_refs_by_provider(
&self,
repo: &str,
branch: &str,
) -> Result<BTreeMap<String, String>> {
let mut output = BTreeMap::new();
for (provider, refs) in self.refs_by_provider(repo)? {
let sha =
refs.branches.get(branch).cloned().ok_or_else(|| {
anyhow!("branch {branch} missing on {provider} for repo {repo}")
})?;
output.insert(provider, sha);
}
Ok(output)
}
fn branch_sha(&self, provider: &ProviderAccount, repo: &str, branch: &str) -> Result<String> {
provider
.ls_remote(repo)?
.branches
.get(branch)
.cloned()
.ok_or_else(|| {
anyhow!(
"branch {branch} missing on {} for repo {repo}",
provider.site_name
)
})
}
fn unprotect_main_all(&self, repo: &str) -> Result<()> {
for provider in &self.settings.providers {
provider.unprotect_branch(repo, MAIN_BRANCH)?;