Always check package status when version is not pinned

When the version isn't a git sha or a tag, we always check that we got
  the last version of a particular dependency before building. This is
  to avoid those awkward moments where someone try to use something from
  the stdlib that is brand new, and despite using 'main' they get a
  strange build failure regarding how it's not available.

  An important note is that we don't actually re-download the package
  when the case occurs; we merely check an HTTP ETag from a (cheap) 'HEAD'
  request on the package registry. If the tag hasn't changed then that
  means the local version is correct.

  The behavior is completely bypassed if the version is specified using
  a git sha or a tag, as here, we can assume that fetching it once it
  enough (and that it can change). If a package maintainer force-pushed
  a tag however, there may be discrepency and the only way around that
  is to `rm -r ./build`.
This commit is contained in:
KtorZ
2023-09-08 14:48:50 +02:00
committed by Lucas
parent 3c3a7f2423
commit 87087a1811
3 changed files with 76 additions and 36 deletions

View File

@@ -28,31 +28,40 @@ impl<'a> Downloader<'a> {
}
}
pub async fn download_packages<T>(
pub async fn download_packages<I, T>(
&self,
packages: T,
event_listener: &T,
packages: I,
project_name: &PackageName,
) -> Result<(), Error>
) -> Result<Vec<(PackageName, bool)>, Error>
where
T: Iterator<Item = &'a Package>,
T: EventListener,
I: Iterator<Item = &'a Package>,
{
let tasks = packages
.filter(|package| project_name != &package.name)
.map(|package| self.ensure_package_in_build_directory(package));
let _results = future::try_join_all(tasks).await?;
Ok(())
future::try_join_all(
packages
.filter(|package| project_name != &package.name)
.map(|package| self.ensure_package_in_build_directory(event_listener, package)),
)
.await
}
pub async fn ensure_package_in_build_directory(
pub async fn ensure_package_in_build_directory<T>(
&self,
event_listener: &T,
package: &Package,
) -> Result<bool, Error> {
let cache_key = paths::CacheKey::new(&self.http, package).await?;
self.ensure_package_downloaded(package, &cache_key).await?;
self.extract_package_from_cache(&package.name, &cache_key)
) -> Result<(PackageName, bool), Error>
where
T: EventListener,
{
let cache_key = paths::CacheKey::new(&self.http, event_listener, package).await?;
let downloaded = self
.ensure_package_downloaded(package, &cache_key)
.await
.map(|downloaded| (package.name.clone(), downloaded))?;
self.extract_package_from_cache(&package.name, &cache_key)
.await?;
Ok(downloaded)
}
pub async fn ensure_package_downloaded(
@@ -101,14 +110,9 @@ impl<'a> Downloader<'a> {
&self,
name: &PackageName,
cache_key: &CacheKey,
) -> Result<bool, Error> {
) -> Result<(), Error> {
let destination = self.root_path.join(paths::build_deps_package(name));
// If the directory already exists then there's nothing for us to do
if destination.is_dir() {
return Ok(false);
}
tokio::fs::create_dir_all(&destination).await?;
let zipball_path = self.root_path.join(paths::package_cache_zipball(cache_key));
@@ -133,7 +137,7 @@ impl<'a> Downloader<'a> {
result?;
Ok(true)
Ok(())
}
}