Merge pull request #2 from auxolotl/stdenv

This commit is contained in:
isabel roses 2024-05-13 17:11:45 +00:00 committed by GitHub
commit 4d829429ca
Failed to generate hash of commit
2437 changed files with 303988 additions and 12 deletions

3
.gitignore vendored
View file

@ -1 +1,2 @@
/result
result
result-*

28
default.nix Normal file
View file

@ -0,0 +1,28 @@
let requiredVersion = import ./lib/minver.nix; in
if ! builtins ? nixVersion || builtins.compareVersions requiredVersion builtins.nixVersion == 1 then
abort ''
This version of Nixpkgs requires Nix >= ${requiredVersion}, please upgrade:
- If you are running NixOS, `nixos-rebuild' can be used to upgrade your system.
- Alternatively, with Nix > 2.0 `nix upgrade-nix' can be used to imperatively
upgrade Nix. You may use `nix-env --version' to check which version you have.
- If you installed Nix using the install script (https://nixos.org/nix/install),
it is safe to upgrade by running it again:
curl -L https://nixos.org/nix/install | sh
For more information, please see the NixOS release notes at
https://nixos.org/nixos/manual or locally at
${toString ./nixos/doc/manual/release-notes}.
If you need further help, see https://nixos.org/nixos/support.html
''
else
import ./pkgs/top-level/impure.nix

View file

@ -2,15 +2,15 @@
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1714253743,
"narHash": "sha256-mdTQw2XlariysyScCv2tTE45QSU9v/ezLcHJ22f0Nxc=",
"owner": "auxolotl",
"lastModified": 1715447595,
"narHash": "sha256-VsVAUQOj/cS1LCOmMjAGeRksXIAdPnFIjCQ0XLkCsT0=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "58a1abdbae3217ca6b702f03d3b35125d88a2994",
"rev": "062ca2a9370a27a35c524dc82d540e6e9824b652",
"type": "github"
},
"original": {
"owner": "auxolotl",
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"

View file

@ -1,6 +1,6 @@
{
inputs = {
nixpkgs.url = "github:auxolotl/nixpkgs/nixos-unstable";
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
};
outputs =
@ -13,14 +13,39 @@
nixPackages = forAllSystems (system: nixpkgs.legacyPackages.${system});
# auxPackages = forAllSystems (system:
# (import ./. { inherit system; })
# );
auxPackages = forAllSystems (system:
(
let requiredVersion = import ./lib/minver.nix; in
if ! builtins ? nixVersion || builtins.compareVersions requiredVersion builtins.nixVersion == 1 then
abort ''
This version of Nixpkgs requires Nix >= ${requiredVersion}, please upgrade:
- If you are running NixOS, `nixos-rebuild' can be used to upgrade your system.
- Alternatively, with Nix > 2.0 `nix upgrade-nix' can be used to imperatively
upgrade Nix. You may use `nix-env --version' to check which version you have.
- If you installed Nix using the install script (https://nixos.org/nix/install),
it is safe to upgrade by running it again:
curl -L https://nixos.org/nix/install | sh
For more information, please see the NixOS release notes at
https://nixos.org/nixos/manual or locally at
${toString ./nixos/doc/manual/release-notes}.
If you need further help, see https://nixos.org/nixos/support.html
''
else
import ./pkgs/top-level/default.nix { localSystem = system; }
)
);
# To test, run nix build .#tests.x86_64-linux.release
tests = forAllSystems (system: {
systems = import ./lib/tests/systems.nix;
release = import ./lib/tests/release.nix { pkgs = self.nixPackages.${system}; };
release = import ./lib/tests/release.nix { pkgs = self.auxPackages.${system}; };
});
};
}

View file

@ -2,7 +2,7 @@
# Don't test properties of pkgs.lib, but rather the lib in the parent directory
pkgs ? import /etc/nix/inputs/nixpkgs {} // { lib = throw "pkgs.lib accessed, but the lib tests should use nixpkgs' lib path directly!"; },
nix ? pkgs-nixVersions.stable,
nixVersions ? [ pkgs-nixVersions.minimum nix pkgs-nixVersions.unstable ],
nixVersions ? [ pkgs-nixVersions.minimum nix pkgs-nixVersions.latest ],
pkgs-nixVersions ? import ./nix-for-tests.nix { inherit pkgs; },
}:

View file

@ -17,6 +17,7 @@
pkgs.runCommand "nixpkgs-lib-tests-nix-${nix.version}" {
buildInputs = [
(import ./check-eval.nix)
# FIXME: reimplement maintainers and teams
# (import ./maintainers.nix {
# inherit pkgs;
# lib = import ../.;

View file

@ -0,0 +1,68 @@
/* List of NixOS maintainers.
```nix
handle = {
# Required
name = "Your name";
# Optional, but at least one of email, matrix or githubId must be given
email = "address@example.org";
matrix = "@user:example.org";
github = "GithubUsername";
githubId = your-github-id;
keys = [{
fingerprint = "AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333";
}];
};
```
where
- `handle` is the handle you are going to use in nixpkgs expressions,
- `name` is a name that people would know and recognize you by,
- `email` is your maintainer email address,
- `matrix` is your Matrix user ID,
- `github` is your GitHub handle (as it appears in the URL of your profile page, `https://github.com/<userhandle>`),
- `githubId` is your GitHub user ID, which can be found at `https://api.github.com/users/<userhandle>`,
- `keys` is a list of your PGP/GPG key fingerprints.
Specifying a GitHub account ensures that you automatically:
- get invited to the @NixOS/nixpkgs-maintainers team ;
- once you are part of the @NixOS org, OfBorg will request you review
pull requests that modify a package for which you are a maintainer.
`handle == github` is strongly preferred whenever `github` is an acceptable attribute name and is short and convenient.
If `github` begins with a numeral, `handle` should be prefixed with an underscore.
```nix
_1example = {
github = "1example";
};
```
Add PGP/GPG keys only if you actually use them to sign commits and/or mail.
To get the required PGP/GPG values for a key run
```shell
gpg --fingerprint <email> | head -n 2
```
!!! Note that PGP/GPG values stored here are for informational purposes only, don't use this file as a source of truth.
More fields may be added in the future, however, in order to comply with GDPR this file should stay as minimal as possible.
When editing this file:
* keep the list alphabetically sorted, check with:
nix-instantiate --eval maintainers/scripts/check-maintainers-sorted.nix
* test the validity of the format with:
nix-build lib/tests/maintainers.nix
See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data.
When adding a new maintainer, be aware of the current commit conventions
documented at [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#commit-conventions)
file located in the root of the Nixpkgs repo.
*/
{
}

48
maintainers/team-list.nix Normal file
View file

@ -0,0 +1,48 @@
/* List of maintainer teams.
name = {
# Required
members = [ maintainer1 maintainer2 ];
scope = "Maintain foo packages.";
shortName = "foo";
# Optional
enableFeatureFreezePing = true;
githubTeams = [ "my-subsystem" ];
};
where
- `members` is the list of maintainers belonging to the group,
- `scope` describes the scope of the group.
- `shortName` short human-readable name
- `enableFeatureFreezePing` will ping this team during the Feature Freeze announcements on releases
- There is limited mention capacity in a single post, so this should be reserved for critical components
or larger ecosystems within nixpkgs.
- `githubTeams` will ping specified GitHub teams as well
More fields may be added in the future.
When editing this file:
* keep the list alphabetically sorted
* test the validity of the format with:
nix-build lib/tests/teams.nix
*/
{ lib }:
with lib.maintainers; {
llvm = {
members = [];
scope = "Maintain LLVM package sets and related packages";
shortName = "LLVM";
enableFeatureFreezePing = true;
};
lix = {
members = [];
};
python = {
members = [];
};
rust = {
members = [];
};
}

View file

@ -0,0 +1,14 @@
{ lib, stdenv }:
stdenv.mkDerivation {
name = "add-driver-runpath";
# Named "opengl-driver" for legacy reasons, but it is the path to
# hardware drivers installed by NixOS
driverLink = "/run/opengl-driver" + lib.optionalString stdenv.isi686 "-32";
buildCommand = ''
mkdir -p $out/nix-support
substituteAll ${./setup-hook.sh} $out/nix-support/setup-hook
'';
}

View file

@ -0,0 +1,29 @@
# Set RUNPATH so that driver libraries in /run/opengl-driver(-32)/lib can be found.
# This is needed to not rely on LD_LIBRARY_PATH which does not work with setuid
# executables. Fixes https://github.com/NixOS/nixpkgs/issues/22760. It must be run
# in postFixup because RUNPATH stripping in fixup would undo it. Note that patchelf
# actually sets RUNPATH not RPATH, which applies only to dependencies of the binary
# it set on (including for dlopen), so the RUNPATH must indeed be set on these
# libraries and would not work if set only on executables.
addDriverRunpath() {
local forceRpath=
while [ $# -gt 0 ]; do
case "$1" in
--) shift; break;;
--force-rpath) shift; forceRpath=1;;
--*)
echo "addDriverRunpath: ERROR: Invalid command line" \
"argument: $1" >&2
return 1;;
*) break;;
esac
done
for file in "$@"; do
if ! isELF "$file"; then continue; fi
local origRpath="$(patchelf --print-rpath "$file")"
patchelf --set-rpath "@driverLink@/lib:$origRpath" ${forceRpath:+--force-rpath} "$file"
done
}

View file

@ -0,0 +1,12 @@
{ lib, stdenv }:
stdenv.mkDerivation {
name = "add-opengl-runpath";
driverLink = "/run/opengl-driver" + lib.optionalString stdenv.isi686 "-32";
buildCommand = ''
mkdir -p $out/nix-support
substituteAll ${./setup-hook.sh} $out/nix-support/setup-hook
'';
}

View file

@ -0,0 +1,29 @@
# Set RUNPATH so that driver libraries in /run/opengl-driver(-32)/lib can be found.
# This is needed to not rely on LD_LIBRARY_PATH which does not work with setuid
# executables. Fixes https://github.com/NixOS/nixpkgs/issues/22760. It must be run
# in postFixup because RUNPATH stripping in fixup would undo it. Note that patchelf
# actually sets RUNPATH not RPATH, which applies only to dependencies of the binary
# it set on (including for dlopen), so the RUNPATH must indeed be set on these
# libraries and would not work if set only on executables.
addOpenGLRunpath() {
local forceRpath=
while [ $# -gt 0 ]; do
case "$1" in
--) shift; break;;
--force-rpath) shift; forceRpath=1;;
--*)
echo "addOpenGLRunpath: ERROR: Invalid command line" \
"argument: $1" >&2
return 1;;
*) break;;
esac
done
for file in "$@"; do
if ! isELF "$file"; then continue; fi
local origRpath="$(patchelf --print-rpath "$file")"
patchelf --set-rpath "@driverLink@/lib:$origRpath" ${forceRpath:+--force-rpath} "$file"
done
}

View file

@ -0,0 +1,122 @@
# Builder for Agda packages.
{ stdenv, lib, self, Agda, runCommand, makeWrapper, writeText, ghcWithPackages, nixosTests }:
let
inherit (lib)
attrValues
elem
filter
filterAttrs
isAttrs
isList
platforms
;
inherit (lib.strings)
concatMapStrings
concatMapStringsSep
optionalString
;
withPackages' = {
pkgs,
ghc ? ghcWithPackages (p: with p; [ ieee754 ])
}: let
pkgs' = if isList pkgs then pkgs else pkgs self;
library-file = writeText "libraries" ''
${(concatMapStringsSep "\n" (p: "${p}/${p.libraryFile}") pkgs')}
'';
pname = "agdaWithPackages";
version = Agda.version;
in runCommand "${pname}-${version}" {
inherit pname version;
nativeBuildInputs = [ makeWrapper ];
passthru = {
unwrapped = Agda;
inherit withPackages;
tests = {
inherit (nixosTests) agda;
allPackages = withPackages (filter self.lib.isUnbrokenAgdaPackage (attrValues self));
};
};
# Agda is a split package with multiple outputs; do not inherit them here.
meta = removeAttrs Agda.meta [ "outputsToInstall" ];
} ''
mkdir -p $out/bin
makeWrapper ${Agda.bin}/bin/agda $out/bin/agda \
--add-flags "--with-compiler=${ghc}/bin/ghc" \
--add-flags "--library-file=${library-file}"
ln -s ${Agda.bin}/bin/agda-mode $out/bin/agda-mode
'';
withPackages = arg: if isAttrs arg then withPackages' arg else withPackages' { pkgs = arg; };
extensions = [
"agda"
"agda-lib"
"agdai"
"lagda"
"lagda.md"
"lagda.org"
"lagda.rst"
"lagda.tex"
"lagda.typ"
];
defaults =
{ pname
, meta
, buildInputs ? []
, everythingFile ? "./Everything.agda"
, includePaths ? []
, libraryName ? pname
, libraryFile ? "${libraryName}.agda-lib"
, buildPhase ? null
, installPhase ? null
, extraExtensions ? []
, ...
}: let
agdaWithArgs = withPackages (filter (p: p ? isAgdaDerivation) buildInputs);
includePathArgs = concatMapStrings (path: "-i" + path + " ") (includePaths ++ [(dirOf everythingFile)]);
in
{
inherit libraryName libraryFile;
isAgdaDerivation = true;
buildInputs = buildInputs ++ [ agdaWithArgs ];
buildPhase = if buildPhase != null then buildPhase else ''
runHook preBuild
agda ${includePathArgs} ${everythingFile}
rm ${everythingFile} ${lib.interfaceFile Agda.version everythingFile}
runHook postBuild
'';
installPhase = if installPhase != null then installPhase else ''
runHook preInstall
mkdir -p $out
find \( ${concatMapStringsSep " -or " (p: "-name '*.${p}'") (extensions ++ extraExtensions)} \) -exec cp -p --parents -t "$out" {} +
runHook postInstall
'';
# As documented at https://github.com/NixOS/nixpkgs/issues/172752,
# we need to set LC_ALL to an UTF-8-supporting locale. However, on
# darwin, it seems that there is no standard such locale; luckily,
# the referenced issue doesn't seem to surface on darwin. Hence let's
# set this only on non-darwin.
LC_ALL = optionalString (!stdenv.isDarwin) "C.UTF-8";
meta = if meta.broken or false then meta // { hydraPlatforms = platforms.none; } else meta;
# Retrieve all packages from the finished package set that have the current package as a dependency and build them
passthru.tests =
filterAttrs (name: pkg: self.lib.isUnbrokenAgdaPackage pkg && elem pname (map (pkg: pkg.pname) pkg.buildInputs)) self;
};
in
{
mkDerivation = args: stdenv.mkDerivation (args // defaults args);
inherit withPackages withPackages';
}

View file

@ -0,0 +1,17 @@
{ lib }:
{
/* Returns the Agda interface file to a given Agda file.
*
* The resulting path may not be normalized.
*
* Examples:
* interfaceFile pkgs.agda.version "./Everything.agda" == "_build/2.6.4.3/agda/./Everything.agdai"
* interfaceFile pkgs.agda.version "src/Everything.lagda.tex" == "_build/2.6.4.3/agda/src/Everything.agdai"
*/
interfaceFile = agdaVersion: agdaFile: "_build/" + agdaVersion + "/agda/" + lib.head (builtins.match ''(.*\.)l?agda(\.(md|org|rst|tex|typ))?'' agdaFile) + "agdai";
/* Takes an arbitrary derivation and says whether it is an agda library package
* that is not marked as broken.
*/
isUnbrokenAgdaPackage = pkg: pkg.isAgdaDerivation or false && !pkg.meta.broken;
}

View file

@ -0,0 +1,143 @@
{ lib, stdenv
, lapack-reference, openblas
, isILP64 ? false
, blasProvider ? openblas }:
let
blasFortranSymbols = [
"caxpy" "ccopy" "cdotc" "cdotu" "cgbmv" "cgemm" "cgemv" "cgerc" "cgeru"
"chbmv" "chemm" "chemv" "cher" "cher2" "cher2k" "cherk" "chpmv" "chpr"
"chpr2" "crotg" "cscal" "csrot" "csscal" "cswap" "csymm" "csyr2k" "csyrk"
"ctbmv" "ctbsv" "ctpmv" "ctpsv" "ctrmm" "ctrmv" "ctrsm" "ctrsv" "dasum"
"daxpy" "dcabs1" "dcopy" "ddot" "dgbmv" "dgemm" "dgemv" "dger" "dnrm2"
"drot" "drotg" "drotm" "drotmg" "dsbmv" "dscal" "dsdot" "dspmv" "dspr"
"dspr2" "dswap" "dsymm" "dsymv" "dsyr" "dsyr2" "dsyr2k" "dsyrk" "dtbmv"
"dtbsv" "dtpmv" "dtpsv" "dtrmm" "dtrmv" "dtrsm" "dtrsv" "dzasum" "dznrm2"
"icamax" "idamax" "isamax" "izamax" "lsame" "sasum" "saxpy" "scabs1"
"scasum" "scnrm2" "scopy" "sdot" "sdsdot" "sgbmv" "sgemm" "sgemv"
"sger" "snrm2" "srot" "srotg" "srotm" "srotmg" "ssbmv" "sscal" "sspmv"
"sspr" "sspr2" "sswap" "ssymm" "ssymv" "ssyr" "ssyr2" "ssyr2k" "ssyrk"
"stbmv" "stbsv" "stpmv" "stpsv" "strmm" "strmv" "strsm" "strsv" "xerbla"
"xerbla_array" "zaxpy" "zcopy" "zdotc" "zdotu" "zdrot" "zdscal" "zgbmv"
"zgemm" "zgemv" "zgerc" "zgeru" "zhbmv" "zhemm" "zhemv" "zher" "zher2"
"zher2k" "zherk" "zhpmv" "zhpr" "zhpr2" "zrotg" "zscal" "zswap" "zsymm"
"zsyr2k" "zsyrk" "ztbmv" "ztbsv" "ztpmv" "ztpsv" "ztrmm" "ztrmv" "ztrsm"
"ztrsv"
];
version = "3";
canonicalExtension = if stdenv.hostPlatform.isLinux
then "${stdenv.hostPlatform.extensions.sharedLibrary}.${version}"
else stdenv.hostPlatform.extensions.sharedLibrary;
blasImplementation = lib.getName blasProvider;
blasProvider' = if blasImplementation == "mkl"
then blasProvider
else blasProvider.override { blas64 = isILP64; };
in
assert isILP64 -> blasImplementation == "mkl" || blasProvider'.blas64;
stdenv.mkDerivation {
pname = "blas";
inherit version;
outputs = [ "out" "dev" ];
meta = (blasProvider'.meta or {}) // {
description = "${lib.getName blasProvider} with just the BLAS C and FORTRAN ABI";
};
passthru = {
inherit isILP64;
provider = blasProvider';
implementation = blasImplementation;
};
dontBuild = true;
dontConfigure = true;
unpackPhase = "src=$PWD";
dontPatchELF = true;
installPhase = (''
mkdir -p $out/lib $dev/include $dev/lib/pkgconfig
libblas="${lib.getLib blasProvider'}/lib/libblas${canonicalExtension}"
if ! [ -e "$libblas" ]; then
echo "$libblas does not exist, ${blasProvider'.name} does not provide libblas."
exit 1
fi
$NM -an "$libblas" | cut -f3 -d' ' > symbols
for symbol in ${toString blasFortranSymbols}; do
grep -q "^$symbol_$" symbols || { echo "$symbol" was not found in "$libblas"; exit 1; }
done
cp -L "$libblas" $out/lib/libblas${canonicalExtension}
chmod +w $out/lib/libblas${canonicalExtension}
'' + (if stdenv.hostPlatform.isElf then ''
patchelf --set-soname libblas${canonicalExtension} $out/lib/libblas${canonicalExtension}
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/libblas${canonicalExtension}):${lib.getLib blasProvider'}/lib" $out/lib/libblas${canonicalExtension}
'' else lib.optionalString (stdenv.hostPlatform.isDarwin) ''
install_name_tool \
-id $out/lib/libblas${canonicalExtension} \
-add_rpath ${lib.getLib blasProvider'}/lib \
$out/lib/libblas${canonicalExtension}
'') + ''
if [ "$out/lib/libblas${canonicalExtension}" != "$out/lib/libblas${stdenv.hostPlatform.extensions.sharedLibrary}" ]; then
ln -s $out/lib/libblas${canonicalExtension} "$out/lib/libblas${stdenv.hostPlatform.extensions.sharedLibrary}"
fi
cat <<EOF > $dev/lib/pkgconfig/blas.pc
Name: blas
Version: ${version}
Description: BLAS FORTRAN implementation
Libs: -L$out/lib -lblas
Cflags: -I$dev/include
EOF
libcblas="${lib.getLib blasProvider'}/lib/libcblas${canonicalExtension}"
if ! [ -e "$libcblas" ]; then
echo "$libcblas does not exist, ${blasProvider'.name} does not provide libcblas."
exit 1
fi
cp -L "$libcblas" $out/lib/libcblas${canonicalExtension}
chmod +w $out/lib/libcblas${canonicalExtension}
'' + (if stdenv.hostPlatform.isElf then ''
patchelf --set-soname libcblas${canonicalExtension} $out/lib/libcblas${canonicalExtension}
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/libcblas${canonicalExtension}):${lib.getLib blasProvider'}/lib" $out/lib/libcblas${canonicalExtension}
'' else lib.optionalString stdenv.hostPlatform.isDarwin ''
install_name_tool \
-id $out/lib/libcblas${canonicalExtension} \
-add_rpath ${lib.getLib blasProvider'}/lib \
$out/lib/libcblas${canonicalExtension}
'') + ''
if [ "$out/lib/libcblas${canonicalExtension}" != "$out/lib/libcblas${stdenv.hostPlatform.extensions.sharedLibrary}" ]; then
ln -s $out/lib/libcblas${canonicalExtension} "$out/lib/libcblas${stdenv.hostPlatform.extensions.sharedLibrary}"
fi
cp ${lib.getDev lapack-reference}/include/cblas{,_mangling}.h $dev/include
cat <<EOF > $dev/lib/pkgconfig/cblas.pc
Name: cblas
Version: ${version}
Description: BLAS C implementation
Cflags: -I$dev/include
Libs: -L$out/lib -lcblas
EOF
'' + lib.optionalString (blasImplementation == "mkl") ''
mkdir -p $out/nix-support
echo 'export MKL_INTERFACE_LAYER=${lib.optionalString isILP64 "I"}LP64,GNU' > $out/nix-support/setup-hook
ln -s $out/lib/libblas${canonicalExtension} $out/lib/libmkl_rt${stdenv.hostPlatform.extensions.sharedLibrary}
ln -sf ${blasProvider'}/include/* $dev/include
'');
}

View file

@ -0,0 +1,113 @@
{ lib, stdenv
, lapack-reference, openblas
, isILP64 ? false
, lapackProvider ? openblas }:
let
version = "3";
canonicalExtension = if stdenv.hostPlatform.isLinux
then "${stdenv.hostPlatform.extensions.sharedLibrary}.${version}"
else stdenv.hostPlatform.extensions.sharedLibrary;
lapackImplementation = lib.getName lapackProvider;
lapackProvider' = if lapackImplementation == "mkl"
then lapackProvider
else lapackProvider.override { blas64 = isILP64; };
in
assert isILP64 -> lapackImplementation == "mkl" || lapackProvider'.blas64;
stdenv.mkDerivation {
pname = "lapack";
inherit version;
outputs = [ "out" "dev" ];
meta = (lapackProvider'.meta or {}) // {
description = "${lib.getName lapackProvider'} with just the LAPACK C and FORTRAN ABI";
};
passthru = {
inherit isILP64;
provider = lapackProvider';
implementation = lapackImplementation;
};
# TODO: drop this forced rebuild, as it was needed just once.
rebuild_salt = if stdenv.isDarwin && stdenv.isx86_64 then "J4AQ" else null;
dontBuild = true;
dontConfigure = true;
unpackPhase = "src=$PWD";
dontPatchELF = true;
installPhase = (''
mkdir -p $out/lib $dev/include $dev/lib/pkgconfig
liblapack="${lib.getLib lapackProvider'}/lib/liblapack${canonicalExtension}"
if ! [ -e "$liblapack" ]; then
echo "$liblapack does not exist, ${lapackProvider'.name} does not provide liblapack."
exit 1
fi
cp -L "$liblapack" $out/lib/liblapack${canonicalExtension}
chmod +w $out/lib/liblapack${canonicalExtension}
'' + (lib.optionalString stdenv.hostPlatform.isElf ''
patchelf --set-soname liblapack${canonicalExtension} $out/lib/liblapack${canonicalExtension}
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/liblapack${canonicalExtension}):${lapackProvider'}/lib" $out/lib/liblapack${canonicalExtension}
'') + ''
if [ "$out/lib/liblapack${canonicalExtension}" != "$out/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary}" ]; then
ln -s $out/lib/liblapack${canonicalExtension} "$out/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary}"
fi
install -D ${lib.getDev lapack-reference}/include/lapack.h $dev/include/lapack.h
cat <<EOF > $dev/lib/pkgconfig/lapack.pc
Name: lapack
Version: ${version}
Description: LAPACK FORTRAN implementation
Cflags: -I$dev/include
Libs: -L$out/lib -llapack
EOF
liblapacke="${lib.getLib lapackProvider'}/lib/liblapacke${canonicalExtension}"
if ! [ -e "$liblapacke" ]; then
echo "$liblapacke does not exist, ${lapackProvider'.name} does not provide liblapacke."
exit 1
fi
cp -L "$liblapacke" $out/lib/liblapacke${canonicalExtension}
chmod +w $out/lib/liblapacke${canonicalExtension}
'' + (lib.optionalString stdenv.hostPlatform.isElf ''
patchelf --set-soname liblapacke${canonicalExtension} $out/lib/liblapacke${canonicalExtension}
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/liblapacke${canonicalExtension}):${lib.getLib lapackProvider'}/lib" $out/lib/liblapacke${canonicalExtension}
'') + ''
if [ -f "$out/lib/liblapacke.so.3" ]; then
ln -s $out/lib/liblapacke.so.3 $out/lib/liblapacke.so
fi
cp ${lib.getDev lapack-reference}/include/lapacke{,_mangling,_config,_utils}.h $dev/include
cat <<EOF > $dev/lib/pkgconfig/lapacke.pc
Name: lapacke
Version: ${version}
Description: LAPACK C implementation
Cflags: -I$dev/include
Libs: -L$out/lib -llapacke
EOF
'' + lib.optionalString (lapackImplementation == "mkl") ''
mkdir -p $out/nix-support
echo 'export MKL_INTERFACE_LAYER=${lib.optionalString isILP64 "I"}LP64,GNU' > $out/nix-support/setup-hook
ln -s $out/lib/liblapack${canonicalExtension} $out/lib/libmkl_rt${stdenv.hostPlatform.extensions.sharedLibrary}
ln -sf ${lapackProvider'}/include/* $dev/include
'');
}

View file

@ -0,0 +1,145 @@
#!@shell@
# shellcheck shell=bash
if [ -n "$DEBUG" ] ; then
set -x
fi
PATH="@path@:$PATH"
apprun_opt=true
OWD=$(readlink -f .)
# can be read by appimages: https://docs.appimage.org/packaging-guide/environment-variables.html
export OWD
# src : AppImage
# dest : let's unpack() create the directory
unpack() {
local src="$1"
local out="$2"
# https://github.com/AppImage/libappimage/blob/ca8d4b53bed5cbc0f3d0398e30806e0d3adeaaab/src/libappimage/utils/MagicBytesChecker.cpp#L45-L63
local appimageSignature;
appimageSignature="$(LC_ALL=C readelf -h "$src" | awk 'NR==2{print $10$11;}')"
local appimageType;
appimageType="$(LC_ALL=C readelf -h "$src" | awk 'NR==2{print $12;}')"
# check AppImage signature
if [ "$appimageSignature" != "4149" ]; then
echo "Not an AppImage file"
exit
fi
case "$appimageType" in
"01")
echo "Uncompress $(basename "$src") of type $appimageType"
mkdir "$out"
pv "$src" | bsdtar -x -C "$out" -f -
;;
"02")
# This method avoid issues with non executable appimages,
# non-native packer, packer patching and squashfs-root destination prefix.
# multiarch offset one-liner using same method as AppImage
# see https://gist.github.com/probonopd/a490ba3401b5ef7b881d5e603fa20c93
offset=$(LC_ALL=C readelf -h "$src" | awk 'NR==13{e_shoff=$5} NR==18{e_shentsize=$5} NR==19{e_shnum=$5} END{print e_shoff+e_shentsize*e_shnum}')
echo "Uncompress $(basename "$src") of type $appimageType @ offset $offset"
unsquashfs -q -d "$out" -o "$offset" "$src"
chmod go-w "$out"
;;
# "03")
# get ready, https://github.com/TheAssassin/type3-runtime
*)
echo Unsupported AppImage Type: "$appimageType"
exit
;;
esac
echo "$(basename "$src") is now installed in $out"
}
apprun() {
SHA256=$(sha256sum "$APPIMAGE" | awk '{print $1}')
export APPDIR="${XDG_CACHE_HOME:-$HOME/.cache}/appimage-run/$SHA256"
#compatibility
if [ -x "$APPDIR/squashfs-root" ]; then APPDIR="$APPDIR/squashfs-root"; fi
if [ ! -x "$APPDIR" ]; then
mkdir -p "$(dirname "$APPDIR")"
unpack "$APPIMAGE" "$APPDIR"
else echo "$(basename "$APPIMAGE")" installed in "$APPDIR"
fi
export PATH="$PATH:$PWD/usr/bin"
}
wrap() {
# quite same in appimageTools
export APPIMAGE_SILENT_INSTALL=1
if [ -n "$APPIMAGE_DEBUG_EXEC" ]; then
cd "$APPDIR" || true
exec "$APPIMAGE_DEBUG_EXEC"
fi
exec "$APPDIR/AppRun" "$@"
}
usage() {
cat <<EOF
Usage: appimage-run [appimage-run options] <AppImage> [AppImage options]
-h show this message
-d debug mode
-x <directory> : extract appimage in the directory then exit.
-w <directory> : run uncompressed appimage directory (used in appimageTools)
[AppImage options]: Options are passed on to the appimage.
If you want to execute a custom command in the appimage's environment, set the APPIMAGE_DEBUG_EXEC environment variable.
EOF
exit 1
}
while getopts "x:w:dh" option; do
case "${option}" in
d) set -x
;;
x) # eXtract
unpack_opt=true
APPDIR=${OPTARG}
;;
w) # WrapAppImage
export APPDIR=${OPTARG}
wrap_opt=true
;;
h) usage
;;
*) usage
;;
esac
done
shift "$((OPTIND-1))"
if [ -n "$wrap_opt" ] && [ -d "$APPDIR" ]; then
wrap "$@"
exit
else
APPIMAGE="$(realpath "$1")" || usage
shift
fi
if [ -n "$unpack_opt" ] && [ -f "$APPIMAGE" ]; then
unpack "$APPIMAGE" "$APPDIR"
exit
fi
if [ -n "$apprun_opt" ] && [ -f "$APPIMAGE" ]; then
apprun
wrap "$@"
exit
fi

View file

@ -0,0 +1,210 @@
{ lib
, bash
, binutils-unwrapped
, coreutils
, gawk
, libarchive
, pv
, squashfsTools
, buildFHSEnv
, pkgs
}:
rec {
appimage-exec = pkgs.substituteAll {
src = ./appimage-exec.sh;
isExecutable = true;
dir = "bin";
path = lib.makeBinPath [
bash
binutils-unwrapped
coreutils
gawk
libarchive
pv
squashfsTools
];
};
extract = args@{ name ? "${args.pname}-${args.version}", postExtract ? "", src, ... }: pkgs.runCommand "${name}-extracted" {
buildInputs = [ appimage-exec ];
} ''
appimage-exec.sh -x $out ${src}
${postExtract}
'';
# for compatibility, deprecated
extractType1 = extract;
extractType2 = extract;
wrapType1 = wrapType2;
wrapAppImage = args@{
src,
extraPkgs,
meta ? {},
...
}: buildFHSEnv
(defaultFhsEnvArgs // {
targetPkgs = pkgs: [ appimage-exec ]
++ defaultFhsEnvArgs.targetPkgs pkgs ++ extraPkgs pkgs;
runScript = "appimage-exec.sh -w ${src} --";
meta = {
sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ];
} // meta;
} // (removeAttrs args (builtins.attrNames (builtins.functionArgs wrapAppImage))));
wrapType2 = args@{ src, extraPkgs ? pkgs: [ ], ... }: wrapAppImage
(args // {
inherit extraPkgs;
src = extract (lib.filterAttrs (key: value: builtins.elem key [ "name" "pname" "version" "src" ]) args);
# passthru src to make nix-update work
# hack to keep the origin position (unsafeGetAttrPos)
passthru = lib.pipe args [
lib.attrNames
(lib.remove "src")
(removeAttrs args)
] // args.passthru or { };
});
defaultFhsEnvArgs = {
# Most of the packages were taken from the Steam chroot
targetPkgs = pkgs: with pkgs; [
gtk3
bashInteractive
gnome.zenity
xorg.xrandr
which
perl
xdg-utils
iana-etc
krb5
gsettings-desktop-schemas
hicolor-icon-theme # dont show a gtk warning about hicolor not being installed
];
# list of libraries expected in an appimage environment:
# https://github.com/AppImage/pkg2appimage/blob/master/excludelist
multiPkgs = pkgs: with pkgs; [
desktop-file-utils
xorg.libXcomposite
xorg.libXtst
xorg.libXrandr
xorg.libXext
xorg.libX11
xorg.libXfixes
libGL
gst_all_1.gstreamer
gst_all_1.gst-plugins-ugly
gst_all_1.gst-plugins-base
libdrm
xorg.xkeyboardconfig
xorg.libpciaccess
glib
gtk2
bzip2
zlib
gdk-pixbuf
xorg.libXinerama
xorg.libXdamage
xorg.libXcursor
xorg.libXrender
xorg.libXScrnSaver
xorg.libXxf86vm
xorg.libXi
xorg.libSM
xorg.libICE
freetype
curlWithGnuTls
nspr
nss
fontconfig
cairo
pango
expat
dbus
cups
libcap
SDL2
libusb1
udev
dbus-glib
atk
at-spi2-atk
libudev0-shim
xorg.libXt
xorg.libXmu
xorg.libxcb
xorg.xcbutil
xorg.xcbutilwm
xorg.xcbutilimage
xorg.xcbutilkeysyms
xorg.xcbutilrenderutil
libGLU
libuuid
libogg
libvorbis
SDL
SDL2_image
glew110
openssl
libidn
tbb
wayland
mesa
libxkbcommon
vulkan-loader
flac
freeglut
libjpeg
libpng12
libpulseaudio
libsamplerate
libmikmod
libthai
libtheora
libtiff
pixman
speex
SDL_image
SDL_ttf
SDL_mixer
SDL2_ttf
SDL2_mixer
libappindicator-gtk2
libcaca
libcanberra
libgcrypt
libvpx
librsvg
xorg.libXft
libvdpau
alsa-lib
harfbuzz
e2fsprogs
libgpg-error
keyutils.lib
libjack2
fribidi
p11-kit
gmp
# libraries not on the upstream include list, but nevertheless expected
# by at least one appimage
libtool.lib # for Synfigstudio
xorg.libxshmfence # for apple-music-electron
at-spi2-core
pciutils # for FreeCAD
pipewire # immersed-vr wayland support
];
};
}

View file

@ -0,0 +1,35 @@
{ lib, stdenv, coreutils, jq, python3, nix, xz }:
# This function is for creating a flat-file binary cache, i.e. the kind created by
# nix copy --to file:///some/path and usable as a substituter (with the file:// prefix).
# For example, in the Nixpkgs repo:
# nix-build -E 'with import ./. {}; mkBinaryCache { rootPaths = [hello]; }'
{ name ? "binary-cache"
, rootPaths
}:
stdenv.mkDerivation {
inherit name;
__structuredAttrs = true;
exportReferencesGraph.closure = rootPaths;
preferLocalBuild = true;
nativeBuildInputs = [ coreutils jq python3 nix xz ];
buildCommand = ''
mkdir -p $out/nar
python ${./make-binary-cache.py}
# These directories must exist, or Nix might try to create them in LocalBinaryCacheStore::init(),
# which fails if mounted read-only
mkdir $out/realisations
mkdir $out/debuginfo
mkdir $out/log
'';
}

View file

@ -0,0 +1,43 @@
import json
import os
import subprocess
with open(os.environ["NIX_ATTRS_JSON_FILE"], "r") as f:
closures = json.load(f)["closure"]
os.chdir(os.environ["out"])
nixPrefix = os.environ["NIX_STORE"] # Usually /nix/store
with open("nix-cache-info", "w") as f:
f.write("StoreDir: " + nixPrefix + "\n")
def dropPrefix(path):
return path[len(nixPrefix + "/"):]
for item in closures:
narInfoHash = dropPrefix(item["path"]).split("-")[0]
xzFile = "nar/" + narInfoHash + ".nar.xz"
with open(xzFile, "w") as f:
subprocess.run("nix-store --dump %s | xz -c" % item["path"], stdout=f, shell=True)
fileHash = subprocess.run(["nix-hash", "--base32", "--type", "sha256", item["path"]], capture_output=True).stdout.decode().strip()
fileSize = os.path.getsize(xzFile)
# Rename the .nar.xz file to its own hash to match "nix copy" behavior
finalXzFile = "nar/" + fileHash + ".nar.xz"
os.rename(xzFile, finalXzFile)
with open(narInfoHash + ".narinfo", "w") as f:
f.writelines((x + "\n" for x in [
"StorePath: " + item["path"],
"URL: " + finalXzFile,
"Compression: xz",
"FileHash: sha256:" + fileHash,
"FileSize: " + str(fileSize),
"NarHash: " + item["narHash"],
"NarSize: " + str(item["narSize"]),
"References: " + " ".join(dropPrefix(ref) for ref in item["references"]),
]))

View file

@ -0,0 +1,81 @@
# Unconditionally adding in platform version flags will result in warnings that
# will be treated as errors by some packages. Add any missing flags here.
# There are two things to be configured: the "platform version" (oldest
# supported version of macos, ios, etc), and the "sdk version".
#
# The modern way of configuring these is to use:
# -platform_version $platform $platform_version $sdk_version"
#
# The old way is still supported, and uses flags like:
# -${platform}_version_min $platform_version
# -sdk_version $sdk_version
#
# If both styles are specified ld will combine them. If multiple versions are
# specified for the same platform, ld will emit an error.
#
# The following adds flags for whichever properties have not already been
# provided.
havePlatformVersionFlag=
haveDarwinSDKVersion=
haveDarwinPlatformVersion=
# Roles will set by add-flags.sh, but add-flags.sh can be skipped when the
# cc-wrapper has added the linker flags. Both the cc-wrapper and the binutils
# wrapper mangle the same variable (MACOSX_DEPLOYMENT_TARGET), so if roles are
# empty due to being run through the cc-wrapper then the mangle here is a no-op
# and we still do the right thing.
#
# To be robust, make sure we always have the correct set of roles.
accumulateRoles
mangleVarSingle @darwinMinVersionVariable@ ${role_suffixes[@]+"${role_suffixes[@]}"}
n=0
nParams=${#params[@]}
while (( n < nParams )); do
p=${params[n]}
case "$p" in
# the current platform
-@darwinPlatform@_version_min)
haveDarwinPlatformVersion=1
;;
# legacy aliases
-macosx_version_min|-iphoneos_version_min|-iosmac_version_min|-uikitformac_version_min)
haveDarwinPlatformVersion=1
;;
-sdk_version)
haveDarwinSDKVersion=1
;;
-platform_version)
havePlatformVersionFlag=1
# If clang can't determine the sdk version it will pass 0.0.0. This
# has runtime effects so we override this to use the known sdk
# version.
if [ "${params[n+3]-}" = 0.0.0 ]; then
params[n+3]=@darwinSdkVersion@
fi
;;
esac
n=$((n + 1))
done
# If the caller has set -platform_version, trust they're doing the right thing.
# This will be the typical case for clang in nixpkgs.
if [ ! "$havePlatformVersionFlag" ]; then
if [ ! "$haveDarwinSDKVersion" ] && [ ! "$haveDarwinPlatformVersion" ]; then
# Nothing provided. Use the modern "-platform_version" to set both.
extraBefore+=(-platform_version @darwinPlatform@ "${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@}" @darwinSdkVersion@)
elif [ ! "$haveDarwinSDKVersion" ]; then
# Add missing sdk version
extraBefore+=(-sdk_version @darwinSdkVersion@)
elif [ ! "$haveDarwinPlatformVersion" ]; then
# Add missing platform version
extraBefore+=(-@darwinPlatform@_version_min "${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@}")
fi
fi

View file

@ -0,0 +1,37 @@
# See cc-wrapper for comments.
var_templates_list=(
NIX_IGNORE_LD_THROUGH_GCC
NIX_LDFLAGS
NIX_LDFLAGS_BEFORE
NIX_DYNAMIC_LINKER
NIX_LDFLAGS_AFTER
NIX_LDFLAGS_HARDEN
NIX_HARDENING_ENABLE
)
var_templates_bool=(
NIX_SET_BUILD_ID
NIX_DONT_SET_RPATH
)
accumulateRoles
for var in "${var_templates_list[@]}"; do
mangleVarList "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
done
for var in "${var_templates_bool[@]}"; do
mangleVarBool "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
done
if [ -e @out@/nix-support/libc-ldflags ]; then
NIX_LDFLAGS_@suffixSalt@+=" $(< @out@/nix-support/libc-ldflags)"
fi
if [ -z "$NIX_DYNAMIC_LINKER_@suffixSalt@" ] && [ -e @out@/nix-support/ld-set-dynamic-linker ]; then
NIX_DYNAMIC_LINKER_@suffixSalt@="$(< @out@/nix-support/dynamic-linker)"
fi
if [ -e @out@/nix-support/libc-ldflags-before ]; then
NIX_LDFLAGS_BEFORE_@suffixSalt@="$(< @out@/nix-support/libc-ldflags-before) $NIX_LDFLAGS_BEFORE_@suffixSalt@"
fi
export NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@=1

View file

@ -0,0 +1,62 @@
declare -a hardeningLDFlags=()
declare -A hardeningEnableMap=()
# Intentionally word-split in case 'NIX_HARDENING_ENABLE' is defined in Nix. The
# array expansion also prevents undefined variables from causing trouble with
# `set -u`.
for flag in ${NIX_HARDENING_ENABLE_@suffixSalt@-}; do
hardeningEnableMap["$flag"]=1
done
# Remove unsupported flags.
for flag in @hardening_unsupported_flags@; do
unset -v "hardeningEnableMap[$flag]"
done
if (( "${NIX_DEBUG:-0}" >= 1 )); then
declare -a allHardeningFlags=(pie relro bindnow)
declare -A hardeningDisableMap=()
# Determine which flags were effectively disabled so we can report below.
for flag in "${allHardeningFlags[@]}"; do
if [[ -z "${hardeningEnableMap[$flag]-}" ]]; then
hardeningDisableMap[$flag]=1
fi
done
printf 'HARDENING: disabled flags:' >&2
(( "${#hardeningDisableMap[@]}" )) && printf ' %q' "${!hardeningDisableMap[@]}" >&2
echo >&2
if (( "${#hardeningEnableMap[@]}" )); then
echo 'HARDENING: Is active (not completely disabled with "all" flag)' >&2;
fi
fi
for flag in "${!hardeningEnableMap[@]}"; do
case $flag in
pie)
if [[ ! (" ${params[*]} " =~ " -shared " \
|| " ${params[*]} " =~ " -static " \
|| " ${params[*]} " =~ " -r " \
|| " ${params[*]} " =~ " -Ur " \
|| " ${params[*]} " =~ " -i ") ]]; then
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling LDFlags -pie >&2; fi
hardeningLDFlags+=('-pie')
fi
;;
relro)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling relro >&2; fi
hardeningLDFlags+=('-z' 'relro')
;;
bindnow)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling bindnow >&2; fi
hardeningLDFlags+=('-z' 'now')
;;
*)
# Ignore unsupported. Checked in Nix that at least *some*
# tool supports each flag.
;;
esac
done

View file

@ -0,0 +1,49 @@
#! @shell@
# shellcheck shell=bash
set -eu -o pipefail +o posix
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
source @signingUtils@
extraAfter=()
extraBefore=()
params=("$@")
input=
pprev=
prev=
for p in \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
do
if [ "$pprev" != "-change" ] && [[ "$prev" != -* ]] && [[ "$p" != -* ]]; then
input="$p"
fi
pprev="$prev"
prev="$p"
done
# Optionally print debug info.
if (( "${NIX_DEBUG:-0}" >= 1 )); then
# Old bash workaround, see above.
echo "extra flags before to @prog@:" >&2
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
echo "original flags to @prog@:" >&2
printf " %q\n" ${params+"${params[@]}"} >&2
echo "extra flags after to @prog@:" >&2
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
fi
@prog@ \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
sign "$input"

View file

@ -0,0 +1,78 @@
#! @shell@
# shellcheck shell=bash
set -eu -o pipefail +o posix
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
source @signingUtils@
extraAfter=()
extraBefore=()
params=("$@")
output=
inputs=()
restAreFiles=
prev=
for p in \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
do
if [ "$restAreFiles" ]; then
inputs+=("$p")
else
case "$prev" in
-s|-R|-d|-arch)
# Unrelated arguments with values
;;
-o)
# Explicit output
output="$p"
;;
*)
# Any other orgument either takes no value, or is a file.
if [[ "$p" != -* ]]; then
inputs+=("$p")
fi
;;
esac
if [ "$p" == - ]; then
restAreFiles=1
fi
fi
prev="$p"
done
# Optionally print debug info.
if (( "${NIX_DEBUG:-0}" >= 1 )); then
# Old bash workaround, see above.
echo "extra flags before to @prog@:" >&2
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
echo "original flags to @prog@:" >&2
printf " %q\n" ${params+"${params[@]}"} >&2
echo "extra flags after to @prog@:" >&2
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
fi
@prog@ \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
if [ "$output" ]; then
# Single explicit output
signIfRequired "$output"
else
# Multiple inputs, rewritten in place
for input in "${inputs[@]}"; do
signIfRequired "$input"
done
fi

View file

@ -0,0 +1,439 @@
# The Nixpkgs CC is not directly usable, since it doesn't know where
# the C library and standard header files are. Therefore the compiler
# produced by that package cannot be installed directly in a user
# environment and used from the command line. So we use a wrapper
# script that sets up the right environment variables so that the
# compiler and the linker just "work".
{ name ? ""
, lib
, stdenvNoCC
, runtimeShell
, bintools ? null, libc ? null, coreutils ? null, gnugrep ? null
, netbsd ? null, netbsdCross ? null
, sharedLibraryLoader ?
if libc == null then
null
else if stdenvNoCC.targetPlatform.isNetBSD then
if !(targetPackages ? netbsdCross) then
netbsd.ld_elf_so
else if libc != targetPackages.netbsdCross.headers then
targetPackages.netbsdCross.ld_elf_so
else
null
else
lib.getLib libc
, nativeTools, noLibc ? false, nativeLibc, nativePrefix ? ""
, propagateDoc ? bintools != null && bintools ? man
, extraPackages ? [], extraBuildCommands ? ""
, isGNU ? bintools.isGNU or false
, isLLVM ? bintools.isLLVM or false
, isCCTools ? bintools.isCCTools or false
, expand-response-params
, targetPackages ? {}
, useMacosReexportHack ? false
, wrapGas ? false
# Note: the hardening flags are part of the bintools-wrapper, rather than
# the cc-wrapper, because a few of them are handled by the linker.
, defaultHardeningFlags ? [
"bindnow"
"format"
"fortify"
"fortify3"
"pic"
"relro"
"stackprotector"
"strictoverflow"
] ++ lib.optional (with stdenvNoCC;
# Musl-based platforms will keep "pie", other platforms will not.
# If you change this, make sure to update section `{#sec-hardening-in-nixpkgs}`
# in the nixpkgs manual to inform users about the defaults.
targetPlatform.libc == "musl"
# Except when:
# - static aarch64, where compilation works, but produces segfaulting dynamically linked binaries.
# - static armv7l, where compilation fails.
&& !(targetPlatform.isAarch && targetPlatform.isStatic)
) "pie"
# Darwin code signing support utilities
, postLinkSignHook ? null, signingUtils ? null
}:
assert nativeTools -> !propagateDoc && nativePrefix != "";
assert !nativeTools -> bintools != null && coreutils != null && gnugrep != null;
assert !(nativeLibc && noLibc);
assert (noLibc || nativeLibc) == (libc == null);
let
inherit (lib)
attrByPath
concatStringsSep
getBin
getDev
getLib
getName
getVersion
hasSuffix
optional
optionalAttrs
optionals
optionalString
platforms
removePrefix
replaceStrings
;
inherit (stdenvNoCC) hostPlatform targetPlatform;
# Prefix for binaries. Customarily ends with a dash separator.
#
# TODO(@Ericson2314) Make unconditional, or optional but always true by
# default.
targetPrefix = optionalString (targetPlatform != hostPlatform)
(targetPlatform.config + "-");
bintoolsVersion = getVersion bintools;
bintoolsName = removePrefix targetPrefix (getName bintools);
libc_bin = optionalString (libc != null) (getBin libc);
libc_dev = optionalString (libc != null) (getDev libc);
libc_lib = optionalString (libc != null) (getLib libc);
bintools_bin = optionalString (!nativeTools) (getBin bintools);
# The wrapper scripts use 'cat' and 'grep', so we may need coreutils.
coreutils_bin = optionalString (!nativeTools) (getBin coreutils);
# See description in cc-wrapper.
suffixSalt = replaceStrings ["-" "."] ["_" "_"] targetPlatform.config;
# The dynamic linker has different names on different platforms. This is a
# shell glob that ought to match it.
dynamicLinker =
/**/ if sharedLibraryLoader == null then ""
else if targetPlatform.libc == "musl" then "${sharedLibraryLoader}/lib/ld-musl-*"
else if targetPlatform.libc == "uclibc" then "${sharedLibraryLoader}/lib/ld*-uClibc.so.1"
else if (targetPlatform.libc == "bionic" && targetPlatform.is32bit) then "/system/bin/linker"
else if (targetPlatform.libc == "bionic" && targetPlatform.is64bit) then "/system/bin/linker64"
else if targetPlatform.libc == "nblibc" then "${sharedLibraryLoader}/libexec/ld.elf_so"
else if targetPlatform.system == "i686-linux" then "${sharedLibraryLoader}/lib/ld-linux.so.2"
else if targetPlatform.system == "x86_64-linux" then "${sharedLibraryLoader}/lib/ld-linux-x86-64.so.2"
# ELFv1 (.1) or ELFv2 (.2) ABI
else if targetPlatform.isPower64 then "${sharedLibraryLoader}/lib/ld64.so.*"
# ARM with a wildcard, which can be "" or "-armhf".
else if (with targetPlatform; isAarch32 && isLinux) then "${sharedLibraryLoader}/lib/ld-linux*.so.3"
else if targetPlatform.system == "aarch64-linux" then "${sharedLibraryLoader}/lib/ld-linux-aarch64.so.1"
else if targetPlatform.system == "powerpc-linux" then "${sharedLibraryLoader}/lib/ld.so.1"
else if targetPlatform.isMips then "${sharedLibraryLoader}/lib/ld.so.1"
# `ld-linux-riscv{32,64}-<abi>.so.1`
else if targetPlatform.isRiscV then "${sharedLibraryLoader}/lib/ld-linux-riscv*.so.1"
else if targetPlatform.isLoongArch64 then "${sharedLibraryLoader}/lib/ld-linux-loongarch*.so.1"
else if targetPlatform.isDarwin then "/usr/lib/dyld"
else if targetPlatform.isFreeBSD then "/libexec/ld-elf.so.1"
else if hasSuffix "pc-gnu" targetPlatform.config then "ld.so.1"
else "";
in
stdenvNoCC.mkDerivation {
pname = targetPrefix
+ (if name != "" then name else "${bintoolsName}-wrapper");
version = optionalString (bintools != null) bintoolsVersion;
preferLocalBuild = true;
outputs = [ "out" ] ++ optionals propagateDoc ([ "man" ] ++ optional (bintools ? info) "info");
passthru = {
inherit targetPrefix suffixSalt;
inherit bintools libc nativeTools nativeLibc nativePrefix isGNU isLLVM;
emacsBufferSetup = pkgs: ''
; We should handle propagation here too
(mapc
(lambda (arg)
(when (file-directory-p (concat arg "/lib"))
(setenv "NIX_LDFLAGS_${suffixSalt}" (concat (getenv "NIX_LDFLAGS_${suffixSalt}") " -L" arg "/lib")))
(when (file-directory-p (concat arg "/lib64"))
(setenv "NIX_LDFLAGS_${suffixSalt}" (concat (getenv "NIX_LDFLAGS_${suffixSalt}") " -L" arg "/lib64"))))
'(${concatStringsSep " " (map (pkg: "\"${pkg}\"") pkgs)}))
'';
inherit defaultHardeningFlags;
};
dontBuild = true;
dontConfigure = true;
enableParallelBuilding = true;
unpackPhase = ''
src=$PWD
'';
installPhase =
''
mkdir -p $out/bin $out/nix-support
wrap() {
local dst="$1"
local wrapper="$2"
export prog="$3"
export use_response_file_by_default=${if isCCTools then "1" else "0"}
substituteAll "$wrapper" "$out/bin/$dst"
chmod +x "$out/bin/$dst"
}
''
+ (if nativeTools then ''
echo ${nativePrefix} > $out/nix-support/orig-bintools
ldPath="${nativePrefix}/bin"
'' else ''
echo $bintools_bin > $out/nix-support/orig-bintools
ldPath="${bintools_bin}/bin"
''
# Solaris needs an additional ld wrapper.
+ optionalString (targetPlatform.isSunOS && nativePrefix != "") ''
ldPath="${nativePrefix}/bin"
exec="$ldPath/${targetPrefix}ld"
wrap ld-solaris ${./ld-solaris-wrapper.sh}
'')
# If we are asked to wrap `gas` and this bintools has it,
# then symlink it (`as` will be symlinked next).
# This is mainly for the wrapped gnat-bootstrap on x86-64 Darwin,
# as it must have both the GNU assembler from cctools (installed as `gas`)
# and the Clang integrated assembler (installed as `as`).
# See pkgs/os-specific/darwin/binutils/default.nix for details.
+ optionalString wrapGas ''
if [ -e $ldPath/${targetPrefix}gas ]; then
ln -s $ldPath/${targetPrefix}gas $out/bin/${targetPrefix}gas
fi
''
# Create symlinks for rest of the binaries.
+ ''
for binary in objdump objcopy size strings as ar nm gprof dwp c++filt addr2line \
ranlib readelf elfedit dlltool dllwrap windmc windres; do
if [ -e $ldPath/${targetPrefix}''${binary} ]; then
ln -s $ldPath/${targetPrefix}''${binary} $out/bin/${targetPrefix}''${binary}
fi
done
'' + (if !useMacosReexportHack then ''
if [ -e ''${ld:-$ldPath/${targetPrefix}ld} ]; then
wrap ${targetPrefix}ld ${./ld-wrapper.sh} ''${ld:-$ldPath/${targetPrefix}ld}
fi
'' else ''
ldInner="${targetPrefix}ld-reexport-delegate"
wrap "$ldInner" ${./macos-sierra-reexport-hack.bash} ''${ld:-$ldPath/${targetPrefix}ld}
wrap "${targetPrefix}ld" ${./ld-wrapper.sh} "$out/bin/$ldInner"
unset ldInner
'') + ''
for variant in $ldPath/${targetPrefix}ld.*; do
basename=$(basename "$variant")
wrap $basename ${./ld-wrapper.sh} $variant
done
'';
strictDeps = true;
depsTargetTargetPropagated = extraPackages;
setupHooks = [
../setup-hooks/role.bash
./setup-hook.sh
];
postFixup =
##
## General libc support
##
optionalString (libc != null) (''
touch "$out/nix-support/libc-ldflags"
echo "-L${libc_lib}${libc.libdir or "/lib"}" >> $out/nix-support/libc-ldflags
echo "${libc_lib}" > $out/nix-support/orig-libc
echo "${libc_dev}" > $out/nix-support/orig-libc-dev
''
##
## Dynamic linker support
##
+ optionalString (sharedLibraryLoader != null) ''
if [[ -z ''${dynamicLinker+x} ]]; then
echo "Don't know the name of the dynamic linker for platform '${targetPlatform.config}', so guessing instead." >&2
local dynamicLinker="${sharedLibraryLoader}/lib/ld*.so.?"
fi
''
# Expand globs to fill array of options
+ ''
dynamicLinker=($dynamicLinker)
case ''${#dynamicLinker[@]} in
0) echo "No dynamic linker found for platform '${targetPlatform.config}'." >&2;;
1) echo "Using dynamic linker: '$dynamicLinker'" >&2;;
*) echo "Multiple dynamic linkers found for platform '${targetPlatform.config}'." >&2;;
esac
if [ -n "''${dynamicLinker-}" ]; then
echo $dynamicLinker > $out/nix-support/dynamic-linker
${if targetPlatform.isDarwin then ''
printf "export LD_DYLD_PATH=%q\n" "$dynamicLinker" >> $out/nix-support/setup-hook
'' else optionalString (sharedLibraryLoader != null) ''
if [ -e ${sharedLibraryLoader}/lib/32/ld-linux.so.2 ]; then
echo ${sharedLibraryLoader}/lib/32/ld-linux.so.2 > $out/nix-support/dynamic-linker-m32
fi
touch $out/nix-support/ld-set-dynamic-linker
''}
fi
'')
##
## User env support
##
# Propagate the underling unwrapped bintools so that if you
# install the wrapper, you get tools like objdump (same for any
# binaries of libc).
+ optionalString (!nativeTools) ''
printWords ${bintools_bin} ${optionalString (libc != null) libc_bin} > $out/nix-support/propagated-user-env-packages
''
##
## Man page and info support
##
+ optionalString propagateDoc (''
ln -s ${bintools.man} $man
'' + optionalString (bintools ? info) ''
ln -s ${bintools.info} $info
'')
##
## Hardening support
##
# some linkers on some platforms don't support specific -z flags
+ ''
export hardening_unsupported_flags=""
if [[ "$($ldPath/${targetPrefix}ld -z now 2>&1 || true)" =~ un(recognized|known)\ option ]]; then
hardening_unsupported_flags+=" bindnow"
fi
if [[ "$($ldPath/${targetPrefix}ld -z relro 2>&1 || true)" =~ un(recognized|known)\ option ]]; then
hardening_unsupported_flags+=" relro"
fi
''
+ optionalString hostPlatform.isCygwin ''
hardening_unsupported_flags+=" pic"
''
+ optionalString (targetPlatform.isAvr || targetPlatform.isWindows) ''
hardening_unsupported_flags+=" relro bindnow"
''
+ optionalString (libc != null && targetPlatform.isAvr) ''
for isa in avr5 avr3 avr4 avr6 avr25 avr31 avr35 avr51 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7 tiny-stack; do
echo "-L${getLib libc}/avr/lib/$isa" >> $out/nix-support/libc-cflags
done
''
+ optionalString targetPlatform.isDarwin ''
echo "-arch ${targetPlatform.darwinArch}" >> $out/nix-support/libc-ldflags
''
##
## GNU specific extra strip flags
##
# TODO(@sternenseemann): make a generic strip wrapper?
+ optionalString (bintools.isGNU or false) ''
wrap ${targetPrefix}strip ${./gnu-binutils-strip-wrapper.sh} \
"${bintools_bin}/bin/${targetPrefix}strip"
''
###
### Remove certain timestamps from final binaries
###
+ optionalString (targetPlatform.isDarwin && !(bintools.isGNU or false)) ''
echo "export ZERO_AR_DATE=1" >> $out/nix-support/setup-hook
''
+ ''
for flags in "$out/nix-support"/*flags*; do
substituteInPlace "$flags" --replace $'\n' ' '
done
substituteAll ${./add-flags.sh} $out/nix-support/add-flags.sh
substituteAll ${./add-hardening.sh} $out/nix-support/add-hardening.sh
substituteAll ${../wrapper-common/utils.bash} $out/nix-support/utils.bash
''
###
### Ensure consistent LC_VERSION_MIN_MACOSX
###
+ optionalString targetPlatform.isDarwin (
let
inherit (targetPlatform)
darwinPlatform darwinSdkVersion
darwinMinVersion darwinMinVersionVariable;
in ''
export darwinPlatform=${darwinPlatform}
export darwinMinVersion=${darwinMinVersion}
export darwinSdkVersion=${darwinSdkVersion}
export darwinMinVersionVariable=${darwinMinVersionVariable}
substituteAll ${./add-darwin-ldflags-before.sh} $out/nix-support/add-local-ldflags-before.sh
''
)
##
## Code signing on Apple Silicon
##
+ optionalString (targetPlatform.isDarwin && targetPlatform.isAarch64) ''
echo 'source ${postLinkSignHook}' >> $out/nix-support/post-link-hook
export signingUtils=${signingUtils}
wrap \
${targetPrefix}install_name_tool \
${./darwin-install_name_tool-wrapper.sh} \
"${bintools_bin}/bin/${targetPrefix}install_name_tool"
wrap \
${targetPrefix}strip ${./darwin-strip-wrapper.sh} \
"${bintools_bin}/bin/${targetPrefix}strip"
''
##
## Extra custom steps
##
+ extraBuildCommands;
env = {
# for substitution in utils.bash
# TODO(@sternenseemann): invent something cleaner than passing in "" in case of absence
expandResponseParams = "${expand-response-params}/bin/expand-response-params";
# TODO(@sternenseemann): rename env var via stdenv rebuild
shell = (getBin runtimeShell + runtimeShell.shellPath or "");
gnugrep_bin = optionalString (!nativeTools) gnugrep;
wrapperName = "BINTOOLS_WRAPPER";
inherit dynamicLinker targetPrefix suffixSalt coreutils_bin;
inherit bintools_bin libc_bin libc_dev libc_lib;
default_hardening_flags_str = builtins.toString defaultHardeningFlags;
};
meta =
let bintools_ = optionalAttrs (bintools != null) bintools; in
(optionalAttrs (bintools_ ? meta) (removeAttrs bintools.meta ["priority"])) //
{ description =
attrByPath ["meta" "description"] "System binary utilities" bintools_
+ " (wrapper script)";
priority = 10;
} // optionalAttrs useMacosReexportHack {
platforms = platforms.darwin;
};
}

View file

@ -0,0 +1,4 @@
#! @shell@
# shellcheck shell=bash
exec @prog@ --enable-deterministic-archives "$@"

View file

@ -0,0 +1,29 @@
#!@shell@
set -eu -o pipefail
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
declare -a args=("$@")
# I've also tried adding -z direct and -z lazyload, but it gave too many problems with C++ exceptions :'(
# Also made sure libgcc would not be lazy-loaded, as suggested here: https://www.illumos.org/issues/2534#note-3
# but still no success.
declare -a argsBefore=(-z ignore) argsAfter=()
# This loop makes sure all -L arguments are before -l arguments, or ld may complain it cannot find a library.
# GNU binutils does not have this problem:
# http://stackoverflow.com/questions/5817269/does-the-order-of-l-and-l-options-in-the-gnu-linker-matter
while (( $# )); do
case "${args[$i]}" in
-L) argsBefore+=("$1" "$2"); shift ;;
-L?*) argsBefore+=("$1") ;;
*) argsAfter+=("$1") ;;
esac
shift
done
# Trace:
set -x
exec "@ld@" "${argsBefore[@]}" "${argsAfter[@]}"

View file

@ -0,0 +1,273 @@
#! @shell@
set -eu -o pipefail +o posix
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
path_backup="$PATH"
# phase separation makes this look useless
# shellcheck disable=SC2157
if [ -n "@coreutils_bin@" ]; then
PATH="@coreutils_bin@/bin"
fi
source @out@/nix-support/utils.bash
if [ -z "${NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
source @out@/nix-support/add-flags.sh
fi
# Optionally filter out paths not refering to the store.
expandResponseParams "$@"
# NIX_LINK_TYPE is set if ld has been called through our cc wrapper. We take
# advantage of this to avoid both recalculating it, and also repeating other
# processing cc wrapper has already done.
if [[ -n "${NIX_LINK_TYPE_@suffixSalt@:-}" ]]; then
linkType=$NIX_LINK_TYPE_@suffixSalt@
else
linkType=$(checkLinkType "${params[@]}")
fi
if [[ "${NIX_ENFORCE_PURITY:-}" = 1 && -n "${NIX_STORE:-}"
&& ( -z "$NIX_IGNORE_LD_THROUGH_GCC_@suffixSalt@" || -z "${NIX_LINK_TYPE_@suffixSalt@:-}" ) ]]; then
rest=()
nParams=${#params[@]}
declare -i n=0
while (( "$n" < "$nParams" )); do
p=${params[n]}
p2=${params[n+1]:-} # handle `p` being last one
if [ "${p:0:3}" = -L/ ] && badPath "${p:2}"; then
skip "${p:2}"
elif [ "$p" = -L ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "$p" = -rpath ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "$p" = -dynamic-linker ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "$p" = -syslibroot ] && [ $p2 == // ]; then
# When gcc is built on darwin --with-build-sysroot=/
# produces '-syslibroot //' linker flag. It's a no-op,
# which does not introduce impurities.
n+=1; skip "$p2"
elif [ "${p:0:10}" = /LIBPATH:/ ] && badPath "${p:9}"; then
reject "${p:9}"
# We need to not match LINK.EXE-style flags like
# /NOLOGO or /LIBPATH:/nix/store/foo
elif [[ $p =~ ^/[^:]*/ ]] && badPath "$p"; then
reject "$p"
elif [ "${p:0:9}" = --sysroot ]; then
# Our ld is not built with sysroot support (Can we fix that?)
:
else
rest+=("$p")
fi
n+=1
done
# Old bash empty array hack
params=(${rest+"${rest[@]}"})
fi
source @out@/nix-support/add-hardening.sh
extraAfter=()
extraBefore=(${hardeningLDFlags[@]+"${hardeningLDFlags[@]}"})
if [ -z "${NIX_LINK_TYPE_@suffixSalt@:-}" ]; then
extraAfter+=($(filterRpathFlags "$linkType" $NIX_LDFLAGS_@suffixSalt@))
extraBefore+=($(filterRpathFlags "$linkType" $NIX_LDFLAGS_BEFORE_@suffixSalt@))
# By adding dynamic linker to extraBefore we allow the users set their
# own dynamic linker as NIX_LD_FLAGS will override earlier set flags
if [[ "$linkType" == dynamic && -n "$NIX_DYNAMIC_LINKER_@suffixSalt@" ]]; then
extraBefore+=("-dynamic-linker" "$NIX_DYNAMIC_LINKER_@suffixSalt@")
fi
fi
extraAfter+=($(filterRpathFlags "$linkType" $NIX_LDFLAGS_AFTER_@suffixSalt@))
# These flags *must not* be pulled up to -Wl, flags, so they can't go in
# add-flags.sh. They must always be set, so must not be disabled by
# NIX_LDFLAGS_SET.
if [ -e @out@/nix-support/add-local-ldflags-before.sh ]; then
source @out@/nix-support/add-local-ldflags-before.sh
fi
# Three tasks:
#
# 1. Find all -L... switches for rpath
#
# 2. Find relocatable flag for build id.
#
# 3. Choose 32-bit dynamic linker if needed
declare -a libDirs
declare -A libs
declare -i relocatable=0 link32=0
linkerOutput="a.out"
if
[ "$NIX_DONT_SET_RPATH_@suffixSalt@" != 1 ] \
|| [ "$NIX_SET_BUILD_ID_@suffixSalt@" = 1 ] \
|| [ -e @out@/nix-support/dynamic-linker-m32 ]
then
prev=
# Old bash thinks empty arrays are undefined, ugh.
for p in \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
do
case "$prev" in
-L)
libDirs+=("$p")
;;
-l)
libs["lib${p}.so"]=1
;;
-m)
# Presumably only the last `-m` flag has any effect.
case "$p" in
elf_i386) link32=1;;
*) link32=0;;
esac
;;
-dynamic-linker | -plugin)
# Ignore this argument, or it will match *.so and be added to rpath.
;;
*)
case "$p" in
-L/*)
libDirs+=("${p:2}")
;;
-l?*)
libs["lib${p:2}.so"]=1
;;
"${NIX_STORE:-}"/*.so | "${NIX_STORE:-}"/*.so.*)
# This is a direct reference to a shared library.
libDirs+=("${p%/*}")
libs["${p##*/}"]=1
;;
-r | --relocatable | -i)
relocatable=1
esac
;;
esac
prev="$p"
done
fi
# Determine linkerOutput
prev=
for p in \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
do
case "$prev" in
-o)
# Informational for post-link-hook
linkerOutput="$p"
;;
*)
;;
esac
prev="$p"
done
if [[ "$link32" == "1" && "$linkType" == dynamic && -e "@out@/nix-support/dynamic-linker-m32" ]]; then
# We have an alternate 32-bit linker and we're producing a 32-bit ELF, let's
# use it.
extraAfter+=(
'-dynamic-linker'
"$(< @out@/nix-support/dynamic-linker-m32)"
)
fi
# Add all used dynamic libraries to the rpath.
if [[ "$NIX_DONT_SET_RPATH_@suffixSalt@" != 1 && "$linkType" != static-pie ]]; then
# For each directory in the library search path (-L...),
# see if it contains a dynamic library used by a -l... flag. If
# so, add the directory to the rpath.
# It's important to add the rpath in the order of -L..., so
# the link time chosen objects will be those of runtime linking.
declare -A rpaths
for dir in ${libDirs+"${libDirs[@]}"}; do
if [[ "$dir" =~ [/.][/.] ]] && dir2=$(readlink -f "$dir"); then
dir="$dir2"
fi
if [ -n "${rpaths[$dir]:-}" ] || [[ "$dir" != "${NIX_STORE:-}"/* ]]; then
# If the path is not in the store, don't add it to the rpath.
# This typically happens for libraries in /tmp that are later
# copied to $out/lib. If not, we're screwed.
continue
fi
for path in "$dir"/*; do
file="${path##*/}"
if [ "${libs[$file]:-}" ]; then
# This library may have been provided by a previous directory,
# but if that library file is inside an output of the current
# derivation, it can be deleted after this compilation and
# should be found in a later directory, so we add all
# directories that contain any of the libraries to rpath.
rpaths["$dir"]=1
extraAfter+=(-rpath "$dir")
break
fi
done
done
fi
# This is outside the DONT_SET_RPATH branch because it's more targeted and we
# usually want it (on Darwin) even if DONT_SET_RPATH is set.
if [ -n "${NIX_COREFOUNDATION_RPATH:-}" ]; then
extraAfter+=(-rpath $NIX_COREFOUNDATION_RPATH)
fi
# Only add --build-id if this is a final link. FIXME: should build gcc
# with --enable-linker-build-id instead?
#
# Note: `lld` interprets `--build-id` to mean `--build-id=fast`; GNU ld defaults
# to SHA1.
if [ "$NIX_SET_BUILD_ID_@suffixSalt@" = 1 ] && ! (( "$relocatable" )); then
extraAfter+=(--build-id="${NIX_BUILD_ID_STYLE:-sha1}")
fi
# Optionally print debug info.
if (( "${NIX_DEBUG:-0}" >= 1 )); then
# Old bash workaround, see above.
echo "extra flags before to @prog@:" >&2
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
echo "original flags to @prog@:" >&2
printf " %q\n" ${params+"${params[@]}"} >&2
echo "extra flags after to @prog@:" >&2
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
fi
PATH="$path_backup"
# Old bash workaround, see above.
if (( "${NIX_LD_USE_RESPONSE_FILE:-@use_response_file_by_default@}" >= 1 )); then
@prog@ @<(printf "%q\n" \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"})
else
@prog@ \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
fi
if [ -e "@out@/nix-support/post-link-hook" ]; then
source @out@/nix-support/post-link-hook
fi

View file

@ -0,0 +1,246 @@
#! @shell@
set -eu -o pipefail
# For cmd | while read; do ...; done
shopt -s lastpipe
path_backup="$PATH"
if [ -n "@coreutils_bin@" ]; then
PATH="@coreutils_bin@/bin"
fi
declare -ri recurThreshold=200
declare -i overflowCount=0
declare -ar origArgs=("$@")
# Throw away what we won't need
declare -a parentArgs=()
while (( $# )); do
case "$1" in
-l)
echo "cctools LD does not support '-l foo'" >&2
exit 1
;;
-lazy_library | -reexport_library | -upward_library | -weak_library)
overflowCount+=1
shift 2
;;
-l* | *.so.* | *.dylib | -lazy-l* | -reexport-l* | -upward-l* | -weak-l*)
overflowCount+=1
shift 1
;;
*.a | *.o)
shift 1
;;
-L | -F)
# Evidentally ld doesn't like using the child's RPATH, so it still
# needs these.
parentArgs+=("$1" "$2")
shift 2
;;
-L?* | -F?*)
parentArgs+=("$1")
shift 1
;;
-o)
outputName="$2"
parentArgs+=("$1" "$2")
shift 2
;;
-install_name | -dylib_install_name | -dynamic-linker | -plugin)
parentArgs+=("$1" "$2")
shift 2
;;
-rpath)
# Only an rpath to the child is needed, which we will add
shift 2
;;
*)
if [[ -f "$1" ]]; then
# Propabably a non-standard object file like Haskell's
# `.dyn_o`. Skip it like other inputs
:
else
parentArgs+=("$1")
fi
shift 1
;;
esac
done
if (( "$overflowCount" <= "$recurThreshold" )); then
if [ -n "${NIX_DEBUG:-}" ]; then
echo "ld-wrapper: Only ${overflowCount} inputs counted while ${recurThreshold} is the ceiling, linking normally. " >&2
fi
PATH="$path_backup"
exec @prog@ "${origArgs[@]}"
fi
if [ -n "${NIX_DEBUG:-}" ]; then
echo "ld-wrapper: ${overflowCount} inputs counted when ${recurThreshold} is the ceiling, inspecting further. " >&2
fi
# Collect the normalized linker input
declare -a norm=()
# Arguments are null-separated
@prog@ --dump-normalized-lib-args "${origArgs[@]}" |
while IFS= read -r -d '' input; do
norm+=("$input")
done
declare -i leafCount=0
declare lastLeaf=''
declare -a childrenInputs=() trailingInputs=()
while (( "${#norm[@]}" )); do
case "${norm[0]}" in
-lazy_library | -upward_library)
# TODO(@Ericson2314): Don't do that, but intersperse children
# between such args.
echo "ld-wrapper: Warning: Potentially changing link order" >&2
trailingInputs+=("${norm[0]}" "${norm[1]}")
norm=("${norm[@]:2}")
;;
-reexport_library | -weak_library)
childrenInputs+=("${norm[0]}" "${norm[1]}")
if [[ "${norm[1]}" != "$lastLeaf" ]]; then
leafCount+=1
lastLeaf="${norm[1]}"
fi
norm=("${norm[@]:2}")
;;
*.so | *.dylib)
childrenInputs+=(-reexport_library "${norm[0]}")
if [[ "${norm[0]}" != "$lastLeaf" ]]; then
leafCount+=1
lastLeaf="${norm[0]}"
fi
norm=("${norm[@]:1}")
;;
*.o | *.a)
# Don't delegate object files or static libs
parentArgs+=("${norm[0]}")
norm=("${norm[@]:1}")
;;
*)
if [[ -f "${norm[0]}" ]]; then
# Propabably a non-standard object file. We'll let it by.
parentArgs+=("${norm[0]}")
norm=("${norm[@]:1}")
else
echo "ld-wrapper: Internal Error: Invalid normalized argument" >&2
exit 255
fi
;;
esac
done
if (( "$leafCount" <= "$recurThreshold" )); then
if [ -n "${NIX_DEBUG:-}" ]; then
echo "ld-wrapper: Only ${leafCount} *dynamic* inputs counted while ${recurThreshold} is the ceiling, linking normally. " >&2
fi
PATH="$path_backup"
exec @prog@ "${origArgs[@]}"
fi
if [ -n "${NIX_DEBUG:-}" ]; then
echo "ld-wrapper: ${leafCount} *dynamic* inputs counted when ${recurThreshold} is the ceiling, delegating to children. " >&2
fi
declare -r outputNameLibless=$( \
if [[ -z "${outputName:+isUndefined}" ]]; then
echo unnamed
return 0;
fi
baseName=$(basename ${outputName})
if [[ "$baseName" = lib* ]]; then
baseName="${baseName:3}"
fi
echo "$baseName")
declare -ra children=(
"$outputNameLibless-reexport-delegate-0"
"$outputNameLibless-reexport-delegate-1"
)
mkdir -p "$out/lib"
symbolBloatObject=$outputNameLibless-symbol-hack.o
if [[ ! -f $symbolBloatObject ]]; then
# `-Q` means use GNU Assembler rather than Clang, avoiding an awkward
# dependency cycle.
printf '.private_extern _______child_hack_foo\nchild_hack_foo:\n' |
PATH="$PATH:@out@/bin" @targetPrefix@as -Q -- -o $symbolBloatObject
fi
# Split inputs between children
declare -a child0Inputs=() child1Inputs=("${childrenInputs[@]}")
let "countFirstChild = $leafCount / 2" || true
lastLeaf=''
while (( "$countFirstChild" )); do
case "${child1Inputs[0]}" in
-reexport_library | -weak_library)
child0Inputs+=("${child1Inputs[0]}" "${child1Inputs[1]}")
if [[ "${child1Inputs[1]}" != "$lastLeaf" ]]; then
let countFirstChild-=1 || true
lastLeaf="${child1Inputs[1]}"
fi
child1Inputs=("${child1Inputs[@]:2}")
;;
*.so | *.dylib)
child0Inputs+=(-reexport_library "${child1Inputs[0]}")
if [[ "${child1Inputs[0]}" != "$lastLeaf" ]]; then
let countFirstChild-=1 || true
lastLeaf="${child1Inputs[1]}"
fi
child1Inputs=("${child1Inputs[@]:2}")
;;
*)
echo "ld-wrapper: Internal Error: Invalid delegated input" >&2
exit -1
;;
esac
done
# First half of libs
@out@/bin/@targetPrefix@ld \
-macosx_version_min $MACOSX_DEPLOYMENT_TARGET -arch x86_64 -dylib \
-o "$out/lib/lib${children[0]}.dylib" \
-install_name "$out/lib/lib${children[0]}.dylib" \
"$symbolBloatObject" "${child0Inputs[@]}" "${trailingInputs[@]}"
# Second half of libs
@out@/bin/@targetPrefix@ld \
-macosx_version_min $MACOSX_DEPLOYMENT_TARGET -arch x86_64 -dylib \
-o "$out/lib/lib${children[1]}.dylib" \
-install_name "$out/lib/lib${children[1]}.dylib" \
"$symbolBloatObject" "${child1Inputs[@]}" "${trailingInputs[@]}"
parentArgs+=("-L$out/lib" -rpath "$out/lib")
if [[ $outputName != *reexport-delegate* ]]; then
parentArgs+=("-l${children[0]}" "-l${children[1]}")
else
parentArgs+=("-reexport-l${children[0]}" "-reexport-l${children[1]}")
fi
parentArgs+=("${trailingInputs[@]}")
if [ -n "${NIX_DEBUG:-}" ]; then
echo "flags using delegated children to @prog@:" >&2
printf " %q\n" "${parentArgs[@]}" >&2
fi
PATH="$path_backup"
exec @prog@ "${parentArgs[@]}"

View file

@ -0,0 +1,72 @@
# Binutils Wrapper hygiene
#
# See comments in cc-wrapper's setup hook. This works exactly the same way.
# Skip setup hook if we're neither a build-time dep, nor, temporarily, doing a
# native compile.
#
# TODO(@Ericson2314): No native exception
[[ -z ${strictDeps-} ]] || (( "$hostOffset" < 0 )) || return 0
bintoolsWrapper_addLDVars () {
# See ../setup-hooks/role.bash
local role_post
getHostRoleEnvHook
if [[ -d "$1/lib64" && ! -L "$1/lib64" ]]; then
export NIX_LDFLAGS${role_post}+=" -L$1/lib64"
fi
if [[ -d "$1/lib" ]]; then
# Don't add the /lib directory if it actually doesn't contain any libraries. For instance,
# Python and Haskell packages often only have directories like $out/lib/ghc-8.4.3/ or
# $out/lib/python3.6/, so having them in LDFLAGS just makes the linker search unnecessary
# directories and bloats the size of the environment variable space.
local -a glob=( $1/lib/lib* )
if [ "${#glob[*]}" -gt 0 ]; then
export NIX_LDFLAGS${role_post}+=" -L$1/lib"
fi
fi
}
# See ../setup-hooks/role.bash
getTargetRole
getTargetRoleWrapper
addEnvHooks "$targetOffset" bintoolsWrapper_addLDVars
# shellcheck disable=SC2157
if [ -n "@bintools_bin@" ]; then
addToSearchPath _PATH @bintools_bin@/bin
fi
# shellcheck disable=SC2157
if [ -n "@libc_bin@" ]; then
addToSearchPath _PATH @libc_bin@/bin
fi
# shellcheck disable=SC2157
if [ -n "@coreutils_bin@" ]; then
addToSearchPath _PATH @coreutils_bin@/bin
fi
# Export tool environment variables so various build systems use the right ones.
export NIX_BINTOOLS${role_post}=@out@
for cmd in \
ar as ld nm objcopy objdump readelf ranlib strip strings size windres
do
if
PATH=$_PATH type -p "@targetPrefix@${cmd}" > /dev/null
then
export "${cmd^^}${role_post}=@targetPrefix@${cmd}";
fi
done
# If unset, assume the default hardening flags.
: ${NIX_HARDENING_ENABLE="@default_hardening_flags_str@"}
export NIX_HARDENING_ENABLE
# No local scope in sourced file
unset -v role_post cmd upper_case

View file

@ -0,0 +1,294 @@
{ stdenv
, cacert
, lib
, writeCBin
}:
args@{
name ? "${args.pname}-${args.version}"
, bazel
, bazelFlags ? []
, bazelBuildFlags ? []
, bazelTestFlags ? []
, bazelRunFlags ? []
, runTargetFlags ? []
, bazelFetchFlags ? []
, bazelTargets ? []
, bazelTestTargets ? []
, bazelRunTarget ? null
, buildAttrs
, fetchAttrs
# Newer versions of Bazel are moving away from built-in rules_cc and instead
# allow fetching it as an external dependency in a WORKSPACE file[1]. If
# removed in the fixed-output fetch phase, building will fail to download it.
# This can be seen e.g. in #73097
#
# This option allows configuring the removal of rules_cc in cases where a
# project depends on it via an external dependency.
#
# [1]: https://github.com/bazelbuild/rules_cc
, removeRulesCC ? true
, removeLocalConfigCc ? true
, removeLocal ? true
# Use build --nobuild instead of fetch. This allows fetching the dependencies
# required for the build as configured, rather than fetching all the dependencies
# which may not work in some situations (e.g. Java code which ends up relying on
# Debian-specific /usr/share/java paths, but doesn't in the configured build).
, fetchConfigured ? true
# Dont add Bazel --copt and --linkopt from NIX_CFLAGS_COMPILE /
# NIX_LDFLAGS. This is necessary when using a custom toolchain which
# Bazel wants all headers / libraries to come from, like when using
# CROSSTOOL. Weirdly, we can still get the flags through the wrapped
# compiler.
, dontAddBazelOpts ? false
, ...
}:
let
fArgs = removeAttrs args [ "buildAttrs" "fetchAttrs" "removeRulesCC" ] // {
inherit
name
bazelFlags
bazelBuildFlags
bazelTestFlags
bazelRunFlags
runTargetFlags
bazelFetchFlags
bazelTargets
bazelTestTargets
bazelRunTarget
dontAddBazelOpts
;
};
fBuildAttrs = fArgs // buildAttrs;
fFetchAttrs = fArgs // removeAttrs fetchAttrs [ "sha256" ];
bazelCmd = { cmd, additionalFlags, targets, targetRunFlags ? [ ] }:
lib.optionalString (targets != [ ]) ''
# See footnote called [USER and BAZEL_USE_CPP_ONLY_TOOLCHAIN variables]
BAZEL_USE_CPP_ONLY_TOOLCHAIN=1 \
USER=homeless-shelter \
bazel \
--batch \
--output_base="$bazelOut" \
--output_user_root="$bazelUserRoot" \
${cmd} \
--curses=no \
"''${copts[@]}" \
"''${host_copts[@]}" \
"''${linkopts[@]}" \
"''${host_linkopts[@]}" \
$bazelFlags \
${lib.strings.concatStringsSep " " additionalFlags} \
${lib.strings.concatStringsSep " " targets} \
${lib.optionalString (targetRunFlags != []) " -- " + lib.strings.concatStringsSep " " targetRunFlags}
'';
# we need this to chmod dangling symlinks on darwin, gnu coreutils refuses to do so:
# chmod: cannot operate on dangling symlink '$symlink'
chmodder = writeCBin "chmodder" ''
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <string.h>
int main(int argc, char** argv) {
mode_t mode = S_IRWXU | S_IRWXG | S_IRWXO;
if (argc != 2) {
fprintf(stderr, "usage: chmodder file");
exit(EXIT_FAILURE);
}
if (lchmod(argv[1], mode) != 0) {
fprintf(stderr, "failed to lchmod '%s': %s", argv[0], strerror(errno));
exit(EXIT_FAILURE);
}
}
'';
in
stdenv.mkDerivation (fBuildAttrs // {
deps = stdenv.mkDerivation (fFetchAttrs // {
name = "${name}-deps.tar.gz";
impureEnvVars = lib.fetchers.proxyImpureEnvVars ++ fFetchAttrs.impureEnvVars or [];
nativeBuildInputs = fFetchAttrs.nativeBuildInputs or [] ++ [ bazel ];
preHook = fFetchAttrs.preHook or "" + ''
export bazelOut="$(echo ''${NIX_BUILD_TOP}/output | sed -e 's,//,/,g')"
export bazelUserRoot="$(echo ''${NIX_BUILD_TOP}/tmp | sed -e 's,//,/,g')"
export HOME="$NIX_BUILD_TOP"
export USER="nix"
# This is needed for git_repository with https remotes
export GIT_SSL_CAINFO="${cacert}/etc/ssl/certs/ca-bundle.crt"
# This is needed for Bazel fetchers that are themselves programs (e.g.
# rules_go using the go toolchain)
export SSL_CERT_FILE="${cacert}/etc/ssl/certs/ca-bundle.crt"
'';
buildPhase = fFetchAttrs.buildPhase or ''
runHook preBuild
${
bazelCmd {
cmd = if fetchConfigured then "build --nobuild" else "fetch";
additionalFlags = [
# We disable multithreading for the fetching phase since it can lead to timeouts with many dependencies/threads:
# https://github.com/bazelbuild/bazel/issues/6502
"--loading_phase_threads=1"
"$bazelFetchFlags"
] ++ (if fetchConfigured then ["--jobs" "$NIX_BUILD_CORES"] else []);
targets = fFetchAttrs.bazelTargets ++ fFetchAttrs.bazelTestTargets;
}
}
runHook postBuild
'';
installPhase = fFetchAttrs.installPhase or (''
runHook preInstall
# Remove all built in external workspaces, Bazel will recreate them when building
rm -rf $bazelOut/external/{bazel_tools,\@bazel_tools.marker}
${lib.optionalString removeRulesCC "rm -rf $bazelOut/external/{rules_cc,\\@rules_cc.marker}"}
rm -rf $bazelOut/external/{embedded_jdk,\@embedded_jdk.marker}
${lib.optionalString removeLocalConfigCc "rm -rf $bazelOut/external/{local_config_cc,\\@local_config_cc.marker}"}
${lib.optionalString removeLocal "rm -rf $bazelOut/external/{local_*,\\@local_*.marker}"}
# Clear markers
find $bazelOut/external -name '@*\.marker' -exec sh -c 'echo > {}' \;
# Remove all vcs files
rm -rf $(find $bazelOut/external -type d -name .git)
rm -rf $(find $bazelOut/external -type d -name .svn)
rm -rf $(find $bazelOut/external -type d -name .hg)
# Removing top-level symlinks along with their markers.
# This is needed because they sometimes point to temporary paths (?).
# For example, in Tensorflow-gpu build:
# platforms -> NIX_BUILD_TOP/tmp/install/35282f5123611afa742331368e9ae529/_embedded_binaries/platforms
find $bazelOut/external -maxdepth 1 -type l | while read symlink; do
name="$(basename "$symlink")"
rm "$symlink"
test -f "$bazelOut/external/@$name.marker" && rm "$bazelOut/external/@$name.marker" || true
done
# Patching symlinks to remove build directory reference
find $bazelOut/external -type l | while read symlink; do
new_target="$(readlink "$symlink" | sed "s,$NIX_BUILD_TOP,NIX_BUILD_TOP,")"
rm "$symlink"
ln -sf "$new_target" "$symlink"
'' + lib.optionalString stdenv.isDarwin ''
# on linux symlink permissions cannot be modified, so we modify those on darwin to match the linux ones
${chmodder}/bin/chmodder "$symlink"
'' + ''
done
echo '${bazel.name}' > $bazelOut/external/.nix-bazel-version
(cd $bazelOut/ && tar czf $out --sort=name --mtime='@1' --owner=0 --group=0 --numeric-owner external/)
runHook postInstall
'');
dontFixup = true;
allowedRequisites = [];
outputHashAlgo = "sha256";
outputHash = fetchAttrs.sha256;
});
nativeBuildInputs = fBuildAttrs.nativeBuildInputs or [] ++ [ (bazel.override { enableNixHacks = true; }) ];
preHook = fBuildAttrs.preHook or "" + ''
export bazelOut="$NIX_BUILD_TOP/output"
export bazelUserRoot="$NIX_BUILD_TOP/tmp"
export HOME="$NIX_BUILD_TOP"
'';
preConfigure = ''
mkdir -p "$bazelOut"
(cd $bazelOut && tar xfz $deps)
test "${bazel.name}" = "$(<$bazelOut/external/.nix-bazel-version)" || {
echo "fixed output derivation was built for a different bazel version" >&2
echo " got: $(<$bazelOut/external/.nix-bazel-version)" >&2
echo "expected: ${bazel.name}" >&2
exit 1
}
chmod -R +w $bazelOut
find $bazelOut -type l | while read symlink; do
if [[ $(readlink "$symlink") == *NIX_BUILD_TOP* ]]; then
ln -sf $(readlink "$symlink" | sed "s,NIX_BUILD_TOP,$NIX_BUILD_TOP,") "$symlink"
fi
done
'' + fBuildAttrs.preConfigure or "";
buildPhase = fBuildAttrs.buildPhase or ''
runHook preBuild
# Bazel sandboxes the execution of the tools it invokes, so even though we are
# calling the correct nix wrappers, the values of the environment variables
# the wrappers are expecting will not be set. So instead of relying on the
# wrappers picking them up, pass them in explicitly via `--copt`, `--linkopt`
# and related flags.
copts=()
host_copts=()
linkopts=()
host_linkopts=()
if [ -z "''${dontAddBazelOpts:-}" ]; then
for flag in $NIX_CFLAGS_COMPILE; do
copts+=( "--copt=$flag" )
host_copts+=( "--host_copt=$flag" )
done
for flag in $NIX_CXXSTDLIB_COMPILE; do
copts+=( "--copt=$flag" )
host_copts+=( "--host_copt=$flag" )
done
for flag in $NIX_LDFLAGS; do
linkopts+=( "--linkopt=-Wl,$flag" )
host_linkopts+=( "--host_linkopt=-Wl,$flag" )
done
fi
${
bazelCmd {
cmd = "test";
additionalFlags =
["--test_output=errors"] ++ fBuildAttrs.bazelTestFlags ++ ["--jobs" "$NIX_BUILD_CORES"];
targets = fBuildAttrs.bazelTestTargets;
}
}
${
bazelCmd {
cmd = "build";
additionalFlags = fBuildAttrs.bazelBuildFlags ++ ["--jobs" "$NIX_BUILD_CORES"];
targets = fBuildAttrs.bazelTargets;
}
}
${
bazelCmd {
cmd = "run";
additionalFlags = fBuildAttrs.bazelRunFlags ++ [ "--jobs" "$NIX_BUILD_CORES" ];
# Bazel run only accepts a single target, but `bazelCmd` expects `targets` to be a list.
targets = lib.optionals (fBuildAttrs.bazelRunTarget != null) [ fBuildAttrs.bazelRunTarget ];
targetRunFlags = fBuildAttrs.runTargetFlags;
}
}
runHook postBuild
'';
})
# [USER and BAZEL_USE_CPP_ONLY_TOOLCHAIN variables]:
# Bazel computes the default value of output_user_root before parsing the
# flag. The computation of the default value involves getting the $USER
# from the environment. Code here :
# https://github.com/bazelbuild/bazel/blob/9323c57607d37f9c949b60e293b573584906da46/src/main/cpp/startup_options.cc#L123-L124
#
# On macOS Bazel will use the system installed Xcode or CLT toolchain instead of the one in the PATH unless we pass BAZEL_USE_CPP_ONLY_TOOLCHAIN.

View file

@ -0,0 +1,266 @@
{ lib
, stdenv
, runCommandLocal
, buildEnv
, writeText
, writeShellScriptBin
, pkgs
, pkgsi686Linux
}:
{ profile ? ""
, targetPkgs ? pkgs: []
, multiPkgs ? pkgs: []
, multiArch ? false # Whether to include 32bit packages
, extraBuildCommands ? ""
, extraBuildCommandsMulti ? ""
, extraOutputsToInstall ? []
, ... # for name, or pname+version
} @ args:
# HOWTO:
# All packages (most likely programs) returned from targetPkgs will only be
# installed once--matching the host's architecture (64bit on x86_64 and 32bit on
# x86).
#
# Packages (most likely libraries) returned from multiPkgs are installed
# once on x86 systems and twice on x86_64 systems.
# On x86 they are merged with packages from targetPkgs.
# On x86_64 they are added to targetPkgs and in addition their 32bit
# versions are also installed. The final directory structure looks as
# follows:
# /lib32 will include 32bit libraries from multiPkgs
# /lib64 will include 64bit libraries from multiPkgs and targetPkgs
# /lib will link to /lib32
let
inherit (stdenv) is64bit;
name = if (args ? pname && args ? version)
then "${args.pname}-${args.version}"
else args.name;
# "use of glibc_multi is only supported on x86_64-linux"
isMultiBuild = multiArch && stdenv.system == "x86_64-linux";
isTargetBuild = !isMultiBuild;
# list of packages (usually programs) which match the host's architecture
# (which includes stuff from multiPkgs)
targetPaths = targetPkgs pkgs ++ (if multiPkgs == null then [] else multiPkgs pkgs);
# list of packages which are for x86 (only multiPkgs, only for x86_64 hosts)
multiPaths = multiPkgs pkgsi686Linux;
# base packages of the chroot
# these match the host's architecture, glibc_multi is used for multilib
# builds. glibcLocales must be before glibc or glibc_multi as otherwiese
# the wrong LOCALE_ARCHIVE will be used where only C.UTF-8 is available.
baseTargetPaths = with pkgs; [
glibcLocales
(if isMultiBuild then glibc_multi else glibc)
(toString gcc.cc.lib)
bashInteractiveFHS
coreutils
less
shadow
su
gawk
diffutils
findutils
gnused
gnugrep
gnutar
gzip
bzip2
xz
];
baseMultiPaths = with pkgsi686Linux; [
(toString gcc.cc.lib)
];
ldconfig = writeShellScriptBin "ldconfig" ''
# due to a glibc bug, 64-bit ldconfig complains about patchelf'd 32-bit libraries, so we use 32-bit ldconfig when we have them
exec ${if isMultiBuild then pkgsi686Linux.glibc.bin else pkgs.glibc.bin}/bin/ldconfig -f /etc/ld.so.conf -C /etc/ld.so.cache "$@"
'';
etcProfile = writeText "profile" ''
export PS1='${name}-chrootenv:\u@\h:\w\$ '
export LOCALE_ARCHIVE='/usr/lib/locale/locale-archive'
export LD_LIBRARY_PATH="/run/opengl-driver/lib:/run/opengl-driver-32/lib''${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH"
export PATH="/run/wrappers/bin:/usr/bin:/usr/sbin:$PATH"
export TZDIR='/etc/zoneinfo'
# XDG_DATA_DIRS is used by pressure-vessel (steam proton) and vulkan loaders to find the corresponding icd
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/run/opengl-driver/share:/run/opengl-driver-32/share
# Following XDG spec [1], XDG_DATA_DIRS should default to "/usr/local/share:/usr/share".
# In nix, it is commonly set without containing these values, so we add them as fallback.
#
# [1] <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>
case ":$XDG_DATA_DIRS:" in
*:/usr/local/share:*) ;;
*) export XDG_DATA_DIRS="$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/usr/local/share" ;;
esac
case ":$XDG_DATA_DIRS:" in
*:/usr/share:*) ;;
*) export XDG_DATA_DIRS="$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/usr/share" ;;
esac
# Force compilers and other tools to look in default search paths
unset NIX_ENFORCE_PURITY
export NIX_BINTOOLS_WRAPPER_TARGET_HOST_${stdenv.cc.suffixSalt}=1
export NIX_CC_WRAPPER_TARGET_HOST_${stdenv.cc.suffixSalt}=1
export NIX_CFLAGS_COMPILE='-idirafter /usr/include'
export NIX_CFLAGS_LINK='-L/usr/lib -L/usr/lib32'
export NIX_LDFLAGS='-L/usr/lib -L/usr/lib32'
export PKG_CONFIG_PATH=/usr/lib/pkgconfig
export ACLOCAL_PATH=/usr/share/aclocal
${profile}
'';
# Compose /etc for the chroot environment
etcPkg = runCommandLocal "${name}-chrootenv-etc" { } ''
mkdir -p $out/etc
pushd $out/etc
# environment variables
ln -s ${etcProfile} profile
# symlink /etc/mtab -> /proc/mounts (compat for old userspace progs)
ln -s /proc/mounts mtab
'';
# Composes a /usr-like directory structure
staticUsrProfileTarget = buildEnv {
name = "${name}-usr-target";
# ldconfig wrapper must come first so it overrides the original ldconfig
paths = [ etcPkg ldconfig ] ++ baseTargetPaths ++ targetPaths;
extraOutputsToInstall = [ "out" "lib" "bin" ] ++ extraOutputsToInstall;
ignoreCollisions = true;
postBuild = ''
if [[ -d $out/share/gsettings-schemas/ ]]; then
# Recreate the standard schemas directory if its a symlink to make it writable
if [[ -L $out/share/glib-2.0 ]]; then
target=$(readlink $out/share/glib-2.0)
rm $out/share/glib-2.0
mkdir $out/share/glib-2.0
ln -fs $target/* $out/share/glib-2.0
fi
if [[ -L $out/share/glib-2.0/schemas ]]; then
target=$(readlink $out/share/glib-2.0/schemas)
rm $out/share/glib-2.0/schemas
mkdir $out/share/glib-2.0/schemas
ln -fs $target/* $out/share/glib-2.0/schemas
fi
mkdir -p $out/share/glib-2.0/schemas
for d in $out/share/gsettings-schemas/*; do
# Force symlink, in case there are duplicates
ln -fs $d/glib-2.0/schemas/*.xml $out/share/glib-2.0/schemas
ln -fs $d/glib-2.0/schemas/*.gschema.override $out/share/glib-2.0/schemas
done
# and compile them
${pkgs.glib.dev}/bin/glib-compile-schemas $out/share/glib-2.0/schemas
fi
'';
};
staticUsrProfileMulti = buildEnv {
name = "${name}-usr-multi";
paths = baseMultiPaths ++ multiPaths;
extraOutputsToInstall = [ "out" "lib" ] ++ extraOutputsToInstall;
ignoreCollisions = true;
};
# setup library paths only for the targeted architecture
setupLibDirsTarget = ''
# link content of targetPaths
cp -rsHf ${staticUsrProfileTarget}/lib lib
ln -s lib lib${if is64bit then "64" else "32"}
'';
# setup /lib, /lib32 and /lib64
setupLibDirsMulti = ''
mkdir -m0755 lib32
mkdir -m0755 lib64
ln -s lib64 lib
# copy glibc stuff
cp -rsHf ${staticUsrProfileTarget}/lib/32/* lib32/
chmod u+w -R lib32/
# copy content of multiPaths (32bit libs)
if [ -d ${staticUsrProfileMulti}/lib ]; then
cp -rsHf ${staticUsrProfileMulti}/lib/* lib32/
chmod u+w -R lib32/
fi
# copy content of targetPaths (64bit libs)
cp -rsHf ${staticUsrProfileTarget}/lib/* lib64/
chmod u+w -R lib64/
# symlink 32-bit ld-linux.so
ln -Ls ${staticUsrProfileTarget}/lib/32/ld-linux.so.2 lib/
'';
setupLibDirs = if isTargetBuild
then setupLibDirsTarget
else setupLibDirsMulti;
# the target profile is the actual profile that will be used for the chroot
setupTargetProfile = ''
mkdir -m0755 usr
pushd usr
${setupLibDirs}
'' + lib.optionalString isMultiBuild ''
if [ -d "${staticUsrProfileMulti}/share" ]; then
cp -rLf ${staticUsrProfileMulti}/share share
fi
'' + ''
if [ -d "${staticUsrProfileTarget}/share" ]; then
if [ -d share ]; then
chmod -R 755 share
cp -rLTf ${staticUsrProfileTarget}/share share
else
cp -rsHf ${staticUsrProfileTarget}/share share
fi
fi
for i in bin sbin include; do
if [ -d "${staticUsrProfileTarget}/$i" ]; then
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
fi
done
cd ..
for i in var etc opt; do
if [ -d "${staticUsrProfileTarget}/$i" ]; then
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
fi
done
for i in usr/{bin,sbin,lib,lib32,lib64}; do
if [ -d "$i" ]; then
ln -s "$i"
fi
done
popd
'';
in runCommandLocal "${name}-fhs" {
passthru = {
inherit args baseTargetPaths targetPaths baseMultiPaths ldconfig isMultiBuild;
};
} ''
mkdir -p $out
pushd $out
${setupTargetProfile}
${extraBuildCommands}
${lib.optionalString isMultiBuild extraBuildCommandsMulti}
''

View file

@ -0,0 +1,289 @@
{ lib
, stdenv
, callPackage
, runCommandLocal
, writeShellScript
, glibc
, pkgsi686Linux
, coreutils
, bubblewrap
}:
{ runScript ? "bash"
, extraInstallCommands ? ""
, meta ? {}
, passthru ? {}
, extraPreBwrapCmds ? ""
, extraBwrapArgs ? []
, unshareUser ? false
, unshareIpc ? false
, unsharePid ? false
, unshareNet ? false
, unshareUts ? false
, unshareCgroup ? false
, privateTmp ? false
, dieWithParent ? true
, ...
} @ args:
assert (!args ? pname || !args ? version) -> (args ? name); # You must provide name if pname or version (preferred) is missing.
let
inherit (lib)
concatLines
concatStringsSep
escapeShellArgs
filter
optionalString
splitString
;
inherit (lib.attrsets) removeAttrs;
name = args.name or "${args.pname}-${args.version}";
executableName = args.pname or args.name;
# we don't know which have been supplied, and want to avoid defaulting missing attrs to null. Passed into runCommandLocal
nameAttrs = lib.filterAttrs (key: value: builtins.elem key [ "name" "pname" "version" ]) args;
buildFHSEnv = callPackage ./buildFHSEnv.nix { };
fhsenv = buildFHSEnv (removeAttrs args [
"runScript" "extraInstallCommands" "meta" "passthru" "extraPreBwrapCmds" "extraBwrapArgs" "dieWithParent"
"unshareUser" "unshareCgroup" "unshareUts" "unshareNet" "unsharePid" "unshareIpc" "privateTmp"
]);
etcBindEntries = let
files = [
# NixOS Compatibility
"static"
"nix" # mainly for nixUnstable users, but also for access to nix/netrc
# Shells
"shells"
"bashrc"
"zshenv"
"zshrc"
"zinputrc"
"zprofile"
# Users, Groups, NSS
"passwd"
"group"
"shadow"
"hosts"
"resolv.conf"
"nsswitch.conf"
# User profiles
"profiles"
# Sudo & Su
"login.defs"
"sudoers"
"sudoers.d"
# Time
"localtime"
"zoneinfo"
# Other Core Stuff
"machine-id"
"os-release"
# PAM
"pam.d"
# Fonts
"fonts"
# ALSA
"alsa"
"asound.conf"
# SSL
"ssl/certs"
"ca-certificates"
"pki"
];
in map (path: "/etc/${path}") files;
# Create this on the fly instead of linking from /nix
# The container might have to modify it and re-run ldconfig if there are
# issues running some binary with LD_LIBRARY_PATH
createLdConfCache = ''
cat > /etc/ld.so.conf <<EOF
/lib
/lib/x86_64-linux-gnu
/lib64
/usr/lib
/usr/lib/x86_64-linux-gnu
/usr/lib64
/lib/i386-linux-gnu
/lib32
/usr/lib/i386-linux-gnu
/usr/lib32
/run/opengl-driver/lib
/run/opengl-driver-32/lib
EOF
ldconfig &> /dev/null
'';
init = run: writeShellScript "${name}-init" ''
source /etc/profile
${createLdConfCache}
exec ${run} "$@"
'';
indentLines = str: concatLines (map (s: " " + s) (filter (s: s != "") (splitString "\n" str)));
bwrapCmd = { initArgs ? "" }: ''
${extraPreBwrapCmds}
ignored=(/nix /dev /proc /etc ${optionalString privateTmp "/tmp"})
ro_mounts=()
symlinks=()
etc_ignored=()
# loop through all entries of root in the fhs environment, except its /etc.
for i in ${fhsenv}/*; do
path="/''${i##*/}"
if [[ $path == '/etc' ]]; then
:
elif [[ -L $i ]]; then
symlinks+=(--symlink "$(${coreutils}/bin/readlink "$i")" "$path")
ignored+=("$path")
else
ro_mounts+=(--ro-bind "$i" "$path")
ignored+=("$path")
fi
done
# loop through the entries of /etc in the fhs environment.
if [[ -d ${fhsenv}/etc ]]; then
for i in ${fhsenv}/etc/*; do
path="/''${i##*/}"
# NOTE: we're binding /etc/fonts and /etc/ssl/certs from the host so we
# don't want to override it with a path from the FHS environment.
if [[ $path == '/fonts' || $path == '/ssl' ]]; then
continue
fi
if [[ -L $i ]]; then
symlinks+=(--symlink "$i" "/etc$path")
else
ro_mounts+=(--ro-bind "$i" "/etc$path")
fi
etc_ignored+=("/etc$path")
done
fi
# propagate /etc from the actual host if nested
if [[ -e /.host-etc ]]; then
ro_mounts+=(--ro-bind /.host-etc /.host-etc)
else
ro_mounts+=(--ro-bind /etc /.host-etc)
fi
# link selected etc entries from the actual root
for i in ${escapeShellArgs etcBindEntries}; do
if [[ "''${etc_ignored[@]}" =~ "$i" ]]; then
continue
fi
if [[ -e $i ]]; then
symlinks+=(--symlink "/.host-etc/''${i#/etc/}" "$i")
fi
done
declare -a auto_mounts
# loop through all directories in the root
for dir in /*; do
# if it is a directory and it is not ignored
if [[ -d "$dir" ]] && [[ ! "''${ignored[@]}" =~ "$dir" ]]; then
# add it to the mount list
auto_mounts+=(--bind "$dir" "$dir")
fi
done
declare -a x11_args
# Always mount a tmpfs on /tmp/.X11-unix
# Rationale: https://github.com/flatpak/flatpak/blob/be2de97e862e5ca223da40a895e54e7bf24dbfb9/common/flatpak-run.c#L277
x11_args+=(--tmpfs /tmp/.X11-unix)
# Try to guess X socket path. This doesn't cover _everything_, but it covers some things.
if [[ "$DISPLAY" == :* ]]; then
display_nr=''${DISPLAY#?}
local_socket=/tmp/.X11-unix/X$display_nr
x11_args+=(--ro-bind-try "$local_socket" "$local_socket")
fi
${optionalString privateTmp ''
# sddm places XAUTHORITY in /tmp
if [[ "$XAUTHORITY" == /tmp/* ]]; then
x11_args+=(--ro-bind-try "$XAUTHORITY" "$XAUTHORITY")
fi
# dbus-run-session puts the socket in /tmp
IFS=";" read -ra addrs <<<"$DBUS_SESSION_BUS_ADDRESS"
for addr in "''${addrs[@]}"; do
[[ "$addr" == unix:* ]] || continue
IFS="," read -ra parts <<<"''${addr#unix:}"
for part in "''${parts[@]}"; do
printf -v part '%s' "''${part//\\/\\\\}"
printf -v part '%b' "''${part//%/\\x}"
[[ "$part" == path=/tmp/* ]] || continue
x11_args+=(--ro-bind-try "''${part#path=}" "''${part#path=}")
done
done
''}
cmd=(
${bubblewrap}/bin/bwrap
--dev-bind /dev /dev
--proc /proc
--chdir "$(pwd)"
${optionalString unshareUser "--unshare-user"}
${optionalString unshareIpc "--unshare-ipc"}
${optionalString unsharePid "--unshare-pid"}
${optionalString unshareNet "--unshare-net"}
${optionalString unshareUts "--unshare-uts"}
${optionalString unshareCgroup "--unshare-cgroup"}
${optionalString dieWithParent "--die-with-parent"}
--ro-bind /nix /nix
${optionalString privateTmp "--tmpfs /tmp"}
# Our glibc will look for the cache in its own path in `/nix/store`.
# As such, we need a cache to exist there, because pressure-vessel
# depends on the existence of an ld cache. However, adding one
# globally proved to be a bad idea (see #100655), the solution we
# settled on being mounting one via bwrap.
# Also, the cache needs to go to both 32 and 64 bit glibcs, for games
# of both architectures to work.
--tmpfs ${glibc}/etc \
--tmpfs /etc \
--symlink /etc/ld.so.conf ${glibc}/etc/ld.so.conf \
--symlink /etc/ld.so.cache ${glibc}/etc/ld.so.cache \
--ro-bind ${glibc}/etc/rpc ${glibc}/etc/rpc \
--remount-ro ${glibc}/etc \
'' + optionalString fhsenv.isMultiBuild (indentLines ''
--tmpfs ${pkgsi686Linux.glibc}/etc \
--symlink /etc/ld.so.conf ${pkgsi686Linux.glibc}/etc/ld.so.conf \
--symlink /etc/ld.so.cache ${pkgsi686Linux.glibc}/etc/ld.so.cache \
--ro-bind ${pkgsi686Linux.glibc}/etc/rpc ${pkgsi686Linux.glibc}/etc/rpc \
--remount-ro ${pkgsi686Linux.glibc}/etc \
'') + ''
"''${ro_mounts[@]}"
"''${symlinks[@]}"
"''${auto_mounts[@]}"
"''${x11_args[@]}"
${concatStringsSep "\n " extraBwrapArgs}
${init runScript} ${initArgs}
)
exec "''${cmd[@]}"
'';
bin = writeShellScript "${name}-bwrap" (bwrapCmd { initArgs = ''"$@"''; });
in runCommandLocal name (nameAttrs // {
inherit meta;
passthru = passthru // {
env = runCommandLocal "${name}-shell-env" {
shellHook = bwrapCmd {};
} ''
echo >&2 ""
echo >&2 "*** User chroot 'env' attributes are intended for interactive nix-shell sessions, not for building! ***"
echo >&2 ""
exit 1
'';
inherit args fhsenv;
};
}) ''
mkdir -p $out/bin
ln -s ${bin} $out/bin/${executableName}
${extraInstallCommands}
''

View file

@ -0,0 +1,16 @@
{ lib, stdenv, meson, ninja, pkg-config, glib }:
stdenv.mkDerivation {
name = "chrootenv";
src = ./src;
nativeBuildInputs = [ meson ninja pkg-config ];
buildInputs = [ glib ];
meta = with lib; {
description = "Setup mount/user namespace for FHS emulation";
license = licenses.mit;
maintainers = with maintainers; [ yana ];
platforms = platforms.linux;
};
}

View file

@ -0,0 +1,169 @@
#define _GNU_SOURCE
#include <glib.h>
#include <glib/gstdio.h>
#include <errno.h>
#include <sched.h>
#include <unistd.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/syscall.h>
#define fail(s, err) g_error("%s: %s: %s", __func__, s, g_strerror(err))
#define fail_if(expr) \
if (expr) \
fail(#expr, errno);
const gchar *bind_blacklist[] = {"bin", "etc", "host", "real-host", "usr", "lib", "lib64", "lib32", "sbin", "opt", NULL};
int pivot_root(const char *new_root, const char *put_old) {
return syscall(SYS_pivot_root, new_root, put_old);
}
void mount_tmpfs(const gchar *target) {
fail_if(mount("none", target, "tmpfs", 0, NULL));
}
void bind_mount(const gchar *source, const gchar *target) {
fail_if(g_mkdir(target, 0755));
fail_if(mount(source, target, NULL, MS_BIND | MS_REC, NULL));
}
const gchar *create_tmpdir() {
gchar *prefix =
g_build_filename(g_get_tmp_dir(), "chrootenvXXXXXX", NULL);
fail_if(!g_mkdtemp_full(prefix, 0755));
return prefix;
}
void pivot_host(const gchar *guest) {
g_autofree gchar *point = g_build_filename(guest, "host", NULL);
fail_if(g_mkdir(point, 0755));
fail_if(pivot_root(guest, point));
}
void bind_mount_item(const gchar *host, const gchar *guest, const gchar *name) {
g_autofree gchar *source = g_build_filename(host, name, NULL);
g_autofree gchar *target = g_build_filename(guest, name, NULL);
if (G_LIKELY(g_file_test(source, G_FILE_TEST_IS_DIR)))
bind_mount(source, target);
}
void bind(const gchar *host, const gchar *guest) {
mount_tmpfs(guest);
pivot_host(guest);
g_autofree gchar *host_dir = g_build_filename("/host", host, NULL);
g_autoptr(GError) err = NULL;
g_autoptr(GDir) dir = g_dir_open(host_dir, 0, &err);
if (err != NULL)
fail("g_dir_open", errno);
const gchar *item;
while ((item = g_dir_read_name(dir)))
if (!g_strv_contains(bind_blacklist, item))
bind_mount_item(host_dir, "/", item);
}
void spit(const char *path, char *fmt, ...) {
va_list args;
va_start(args, fmt);
FILE *f = g_fopen(path, "w");
if (f == NULL)
fail("g_fopen", errno);
g_vfprintf(f, fmt, args);
fclose(f);
}
int main(gint argc, gchar **argv) {
const gchar *self = *argv++;
if (argc < 2) {
g_message("%s command [arguments...]", self);
return 1;
}
g_autofree const gchar *prefix = create_tmpdir();
pid_t cpid = fork();
if (cpid < 0)
fail("fork", errno);
else if (cpid == 0) {
uid_t uid = getuid();
gid_t gid = getgid();
int namespaces = CLONE_NEWNS;
if (uid != 0) {
namespaces |= CLONE_NEWUSER;
}
if (unshare(namespaces) < 0) {
int unshare_errno = errno;
g_message("Requires Linux version >= 3.19 built with CONFIG_USER_NS");
if (g_file_test("/proc/sys/kernel/unprivileged_userns_clone",
G_FILE_TEST_EXISTS))
g_message("Run: sudo sysctl -w kernel.unprivileged_userns_clone=1");
fail("unshare", unshare_errno);
}
// hide all mounts we do from the parent
fail_if(mount(0, "/", 0, MS_SLAVE | MS_REC, 0));
if (uid != 0) {
spit("/proc/self/setgroups", "deny");
spit("/proc/self/uid_map", "%d %d 1", uid, uid);
spit("/proc/self/gid_map", "%d %d 1", gid, gid);
}
// If there is a /host directory, assume this is nested chrootenv and use it as host instead.
gboolean nested_host = g_file_test("/host", G_FILE_TEST_EXISTS | G_FILE_TEST_IS_DIR);
g_autofree const gchar *host = nested_host ? "/host" : "/";
bind(host, prefix);
// Replace /host by an actual (inner) /host.
if (nested_host) {
fail_if(g_mkdir("/real-host", 0755));
fail_if(mount("/host/host", "/real-host", NULL, MS_BIND | MS_REC, NULL));
// For some reason umount("/host") returns EBUSY even immediately after
// pivot_root. We detach it at least to keep `/proc/mounts` from blowing
// up in nested cases.
fail_if(umount2("/host", MNT_DETACH));
fail_if(mount("/real-host", "/host", NULL, MS_MOVE, NULL));
fail_if(rmdir("/real-host"));
}
fail_if(chdir("/"));
fail_if(execvp(*argv, argv));
}
else {
int status;
fail_if(waitpid(cpid, &status, 0) != cpid);
fail_if(rmdir(prefix));
if (WIFEXITED(status))
return WEXITSTATUS(status);
else if (WIFSIGNALED(status))
kill(getpid(), WTERMSIG(status));
return 1;
}
}

View file

@ -0,0 +1,5 @@
project('chrootenv', 'c')
glib = dependency('glib-2.0')
executable('chrootenv', 'chrootenv.c', dependencies: [glib], install: true)

View file

@ -0,0 +1,53 @@
{ lib, callPackage, runCommandLocal, writeScript, stdenv, coreutils }:
let buildFHSEnv = callPackage ./env.nix { }; in
args@{ name, version ? null, runScript ? "bash", extraInstallCommands ? "", meta ? {}, passthru ? {}, ... }:
let
env = buildFHSEnv (removeAttrs args [ "version" "runScript" "extraInstallCommands" "meta" "passthru" ]);
chrootenv = callPackage ./chrootenv {};
init = run: writeScript "${name}-init" ''
#! ${stdenv.shell}
for i in ${env}/* /host/*; do
path="/''${i##*/}"
[ -e "$path" ] || ${coreutils}/bin/ln -s "$i" "$path"
done
[ -d "$1" ] && [ -r "$1" ] && cd "$1"
shift
source /etc/profile
exec ${run} "$@"
'';
versionStr = lib.optionalString (version != null) ("-" + version);
nameAndVersion = name + versionStr;
in runCommandLocal nameAndVersion {
inherit meta;
passthru = passthru // {
env = runCommandLocal "${name}-shell-env" {
shellHook = ''
exec ${chrootenv}/bin/chrootenv ${init runScript} "$(pwd)"
'';
} ''
echo >&2 ""
echo >&2 "*** User chroot 'env' attributes are intended for interactive nix-shell sessions, not for building! ***"
echo >&2 ""
exit 1
'';
};
} ''
mkdir -p $out/bin
cat <<EOF >$out/bin/${name}
#! ${stdenv.shell}
exec ${chrootenv}/bin/chrootenv ${init runScript} "\$(pwd)" "\$@"
EOF
chmod +x $out/bin/${name}
${extraInstallCommands}
''

View file

@ -0,0 +1,259 @@
{ stdenv, lib, buildEnv, writeText, pkgs, pkgsi686Linux }:
{ name
, profile ? ""
, targetPkgs ? pkgs: []
, multiPkgs ? pkgs: []
, extraBuildCommands ? ""
, extraBuildCommandsMulti ? ""
, extraOutputsToInstall ? []
}:
# HOWTO:
# All packages (most likely programs) returned from targetPkgs will only be
# installed once--matching the host's architecture (64bit on x86_64 and 32bit on
# x86).
#
# Packages (most likely libraries) returned from multiPkgs are installed
# once on x86 systems and twice on x86_64 systems.
# On x86 they are merged with packages from targetPkgs.
# On x86_64 they are added to targetPkgs and in addition their 32bit
# versions are also installed. The final directory structure looks as
# follows:
# /lib32 will include 32bit libraries from multiPkgs
# /lib64 will include 64bit libraries from multiPkgs and targetPkgs
# /lib will link to /lib32
let
is64Bit = stdenv.hostPlatform.parsed.cpu.bits == 64;
# multi-lib glibc is only supported on x86_64
isMultiBuild = multiPkgs != null && stdenv.hostPlatform.system == "x86_64-linux";
isTargetBuild = !isMultiBuild;
# list of packages (usually programs) which are only be installed for the
# host's architecture
targetPaths = targetPkgs pkgs ++ (if multiPkgs == null then [] else multiPkgs pkgs);
# list of packages which are installed for both x86 and x86_64 on x86_64
# systems
multiPaths = multiPkgs pkgsi686Linux;
# base packages of the chroot
# these match the host's architecture, glibc_multi is used for multilib
# builds. glibcLocales must be before glibc or glibc_multi as otherwiese
# the wrong LOCALE_ARCHIVE will be used where only C.UTF-8 is available.
basePkgs = with pkgs;
[ glibcLocales
(if isMultiBuild then glibc_multi else glibc)
(toString gcc.cc.lib) bashInteractiveFHS coreutils less shadow su
gawk diffutils findutils gnused gnugrep
gnutar gzip bzip2 xz
];
baseMultiPkgs = with pkgsi686Linux;
[ (toString gcc.cc.lib)
];
etcProfile = writeText "profile" ''
export PS1='${name}-chrootenv:\u@\h:\w\$ '
export LOCALE_ARCHIVE='/usr/lib/locale/locale-archive'
export LD_LIBRARY_PATH="/run/opengl-driver/lib:/run/opengl-driver-32/lib:/usr/lib:/usr/lib32''${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH"
export PATH="/run/wrappers/bin:/usr/bin:/usr/sbin:$PATH"
export TZDIR='/etc/zoneinfo'
# XDG_DATA_DIRS is used by pressure-vessel (steam proton) and vulkan loaders to find the corresponding icd
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/run/opengl-driver/share:/run/opengl-driver-32/share
# Following XDG spec [1], XDG_DATA_DIRS should default to "/usr/local/share:/usr/share".
# In nix, it is commonly set without containing these values, so we add them as fallback.
#
# [1] <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>
case ":$XDG_DATA_DIRS:" in
*:/usr/local/share:*) ;;
*) export XDG_DATA_DIRS="$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/usr/local/share" ;;
esac
case ":$XDG_DATA_DIRS:" in
*:/usr/share:*) ;;
*) export XDG_DATA_DIRS="$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/usr/share" ;;
esac
# Force compilers and other tools to look in default search paths
unset NIX_ENFORCE_PURITY
export NIX_BINTOOLS_WRAPPER_TARGET_HOST_${stdenv.cc.suffixSalt}=1
export NIX_CC_WRAPPER_TARGET_HOST_${stdenv.cc.suffixSalt}=1
export NIX_CFLAGS_COMPILE='-idirafter /usr/include'
export NIX_CFLAGS_LINK='-L/usr/lib -L/usr/lib32'
export NIX_LDFLAGS='-L/usr/lib -L/usr/lib32'
export PKG_CONFIG_PATH=/usr/lib/pkgconfig
export ACLOCAL_PATH=/usr/share/aclocal
${profile}
'';
# Compose /etc for the chroot environment
etcPkg = stdenv.mkDerivation {
name = "${name}-chrootenv-etc";
buildCommand = ''
mkdir -p $out/etc
cd $out/etc
# environment variables
ln -s ${etcProfile} profile
# compatibility with NixOS
ln -s /host/etc/static static
# symlink nix config
ln -s /host/etc/nix nix
# symlink some NSS stuff
ln -s /host/etc/passwd passwd
ln -s /host/etc/group group
ln -s /host/etc/shadow shadow
ln -s /host/etc/hosts hosts
ln -s /host/etc/resolv.conf resolv.conf
ln -s /host/etc/nsswitch.conf nsswitch.conf
# symlink user profiles
ln -s /host/etc/profiles profiles
# symlink sudo and su stuff
ln -s /host/etc/login.defs login.defs
ln -s /host/etc/sudoers sudoers
ln -s /host/etc/sudoers.d sudoers.d
# symlink other core stuff
ln -s /host/etc/localtime localtime
ln -s /host/etc/zoneinfo zoneinfo
ln -s /host/etc/machine-id machine-id
ln -s /host/etc/os-release os-release
# symlink PAM stuff
ln -s /host/etc/pam.d pam.d
# symlink fonts stuff
ln -s /host/etc/fonts fonts
# symlink ALSA stuff
ln -s /host/etc/asound.conf asound.conf
ln -s /host/etc/alsa alsa
# symlink SSL certs
mkdir -p ssl
ln -s /host/etc/ssl/certs ssl/certs
# symlink /etc/mtab -> /proc/mounts (compat for old userspace progs)
ln -s /proc/mounts mtab
'';
};
# Composes a /usr-like directory structure
staticUsrProfileTarget = buildEnv {
name = "${name}-usr-target";
paths = [ etcPkg ] ++ basePkgs ++ targetPaths;
extraOutputsToInstall = [ "out" "lib" "bin" ] ++ extraOutputsToInstall;
ignoreCollisions = true;
postBuild = ''
if [[ -d $out/share/gsettings-schemas/ ]]; then
# Recreate the standard schemas directory if its a symlink to make it writable
if [[ -L $out/share/glib-2.0 ]]; then
target=$(readlink $out/share/glib-2.0)
rm $out/share/glib-2.0
mkdir $out/share/glib-2.0
ln -fs $target/* $out/share/glib-2.0
fi
if [[ -L $out/share/glib-2.0/schemas ]]; then
target=$(readlink $out/share/glib-2.0/schemas)
rm $out/share/glib-2.0/schemas
mkdir $out/share/glib-2.0/schemas
ln -fs $target/* $out/share/glib-2.0/schemas
fi
mkdir -p $out/share/glib-2.0/schemas
for d in $out/share/gsettings-schemas/*; do
# Force symlink, in case there are duplicates
ln -fs $d/glib-2.0/schemas/*.xml $out/share/glib-2.0/schemas
ln -fs $d/glib-2.0/schemas/*.gschema.override $out/share/glib-2.0/schemas
done
# and compile them
${pkgs.glib.dev}/bin/glib-compile-schemas $out/share/glib-2.0/schemas
fi
'';
};
staticUsrProfileMulti = buildEnv {
name = "${name}-usr-multi";
paths = baseMultiPkgs ++ multiPaths;
extraOutputsToInstall = [ "out" "lib" ] ++ extraOutputsToInstall;
ignoreCollisions = true;
};
# setup library paths only for the targeted architecture
setupLibDirs_target = ''
# link content of targetPaths
cp -rsHf ${staticUsrProfileTarget}/lib lib
ln -s lib lib${if is64Bit then "64" else "32"}
'';
# setup /lib, /lib32 and /lib64
setupLibDirs_multi = ''
mkdir -m0755 lib32
mkdir -m0755 lib64
ln -s lib64 lib
# copy glibc stuff
cp -rsHf ${staticUsrProfileTarget}/lib/32/* lib32/ && chmod u+w -R lib32/
# copy content of multiPaths (32bit libs)
[ -d ${staticUsrProfileMulti}/lib ] && cp -rsHf ${staticUsrProfileMulti}/lib/* lib32/ && chmod u+w -R lib32/
# copy content of targetPaths (64bit libs)
cp -rsHf ${staticUsrProfileTarget}/lib/* lib64/ && chmod u+w -R lib64/
# symlink 32-bit ld-linux.so
ln -Ls ${staticUsrProfileTarget}/lib/32/ld-linux.so.2 lib/
'';
setupLibDirs = if isTargetBuild then setupLibDirs_target
else setupLibDirs_multi;
# the target profile is the actual profile that will be used for the chroot
setupTargetProfile = ''
mkdir -m0755 usr
cd usr
${setupLibDirs}
for i in bin sbin share include; do
if [ -d "${staticUsrProfileTarget}/$i" ]; then
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
fi
done
cd ..
for i in var etc opt; do
if [ -d "${staticUsrProfileTarget}/$i" ]; then
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
fi
done
for i in usr/{bin,sbin,lib,lib32,lib64}; do
if [ -d "$i" ]; then
ln -s "$i"
fi
done
'';
in stdenv.mkDerivation {
name = "${name}-fhs";
buildCommand = ''
mkdir -p $out
cd $out
${setupTargetProfile}
cd $out
${extraBuildCommands}
cd $out
${lib.optionalString isMultiBuild extraBuildCommandsMulti}
'';
preferLocalBuild = true;
allowSubstitutes = false;
}

View file

@ -0,0 +1,84 @@
{ lib
, stdenv
, glibcLocales
# The GraalVM derivation to use
, graalvmDrv
, removeReferencesTo
, executable ? args.pname
# JAR used as input for GraalVM derivation, defaults to src
, jar ? args.src
, dontUnpack ? (jar == args.src)
# Default native-image arguments. You probably don't want to set this,
# except in special cases. In most cases, use extraNativeBuildArgs instead
, nativeImageBuildArgs ? [
(lib.optionalString stdenv.isDarwin "-H:-CheckToolchain")
(lib.optionalString (stdenv.isLinux && stdenv.isAarch64) "-H:PageSize=64K")
"-H:Name=${executable}"
"-march=compatibility"
"--verbose"
]
# Extra arguments to be passed to the native-image
, extraNativeImageBuildArgs ? [ ]
# XMX size of GraalVM during build
, graalvmXmx ? "-J-Xmx6g"
, meta ? { }
, LC_ALL ? "en_US.UTF-8"
, ...
} @ args:
let
extraArgs = builtins.removeAttrs args [
"lib"
"stdenv"
"glibcLocales"
"jar"
"dontUnpack"
"LC_ALL"
"meta"
"buildPhase"
"nativeBuildInputs"
"installPhase"
"postInstall"
];
in
stdenv.mkDerivation ({
inherit dontUnpack jar;
env = { inherit LC_ALL; };
nativeBuildInputs = (args.nativeBuildInputs or [ ]) ++ [ graalvmDrv glibcLocales removeReferencesTo ];
nativeImageBuildArgs = nativeImageBuildArgs ++ extraNativeImageBuildArgs ++ [ graalvmXmx ];
buildPhase = args.buildPhase or ''
runHook preBuild
native-image -jar "$jar" $(export -p | sed -n 's/^declare -x \([^=]\+\)=.*$/ -E\1/p' | tr -d \\n) ''${nativeImageBuildArgs[@]}
runHook postBuild
'';
installPhase = args.installPhase or ''
runHook preInstall
install -Dm755 ${executable} -t $out/bin
runHook postInstall
'';
postInstall = ''
remove-references-to -t ${graalvmDrv} $out/bin/${executable}
${args.postInstall or ""}
'';
disallowedReferences = [ graalvmDrv ];
passthru = { inherit graalvmDrv; };
meta = {
# default to graalvm's platforms
platforms = graalvmDrv.meta.platforms;
# default to executable name
mainProgram = executable;
} // meta;
} // extraArgs)

View file

@ -0,0 +1,88 @@
{ stdenv, maven, runCommand, writeText, fetchurl, lib, requireFile, linkFarm }:
# Takes an info file generated by mvn2nix
# (https://github.com/NixOS/mvn2nix-maven-plugin) and builds the maven
# project with it.
#
# repo: A local maven repository with the project's dependencies.
#
# settings: A settings.xml to pass to maven to use the repo.
#
# build: A simple build derivation that uses mvn compile and package to build
# the project.
#
# @example
# project = pkgs.buildMaven ./project-info.json
infoFile:
let
info = lib.importJSON infoFile;
dependencies = lib.flatten (map (dep:
let
inherit (dep) sha1 groupId artifactId version metadata repository-id;
versionDir = dep.unresolved-version or version;
authenticated = dep.authenticated or false;
url = dep.url or "";
fetch = if (url != "") then
((if authenticated then requireFile else fetchurl) {
inherit url sha1;
})
else
"";
fetchMetadata = (if authenticated then requireFile else fetchurl) {
inherit (metadata) url sha1;
};
layout = "${
builtins.replaceStrings [ "." ] [ "/" ] groupId
}/${artifactId}/${versionDir}";
in lib.optional (url != "") {
layout = "${layout}/${fetch.name}";
drv = fetch;
} ++ lib.optionals (dep ? metadata) ([{
layout = "${layout}/maven-metadata-${repository-id}.xml";
drv = fetchMetadata;
}] ++ lib.optional (fetch != "") {
layout = "${layout}/${
builtins.replaceStrings [ version ] [ dep.unresolved-version ]
fetch.name
}";
drv = fetch;
})) info.dependencies);
repo = linkFarm "maven-repository" (lib.forEach dependencies (dependency: {
name = dependency.layout;
path = dependency.drv;
}));
settings = writeText "settings.xml" ''
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
http://maven.apache.org/xsd/settings-1.0.0.xsd">
<localRepository>${repo}</localRepository>
</settings>
'';
src = dirOf infoFile;
in {
inherit repo settings info;
build = stdenv.mkDerivation {
name = "${info.project.artifactId}-${info.project.version}.jar";
src = builtins.filterSource (path: type:
(toString path) != (toString (src + "/target")) && (toString path)
!= (toString (src + "/.git"))) src;
buildInputs = [ maven ];
buildPhase = "mvn --offline --settings ${settings} compile";
installPhase = ''
mvn --offline --settings ${settings} package
mv target/*.jar $out
'';
};
}

View file

@ -0,0 +1,283 @@
#! @perl@ -w
use strict;
use Cwd 'abs_path';
use IO::Handle;
use File::Path;
use File::Basename;
use File::Compare;
use JSON::PP;
STDOUT->autoflush(1);
$SIG{__WARN__} = sub { warn "warning: ", @_ };
$SIG{__DIE__} = sub { die "error: ", @_ };
my $out = $ENV{"out"};
my $extraPrefix = $ENV{"extraPrefix"};
my @pathsToLink = split ' ', $ENV{"pathsToLink"};
sub isInPathsToLink {
my $path = shift;
$path = "/" if $path eq "";
foreach my $elem (@pathsToLink) {
return 1 if
$elem eq "/" ||
(substr($path, 0, length($elem)) eq $elem
&& (($path eq $elem) || (substr($path, length($elem), 1) eq "/")));
}
return 0;
}
# Returns whether a path in one of the linked packages may contain
# files in one of the elements of pathsToLink.
sub hasPathsToLink {
my $path = shift;
foreach my $elem (@pathsToLink) {
return 1 if
$path eq "" ||
(substr($elem, 0, length($path)) eq $path
&& (($path eq $elem) || (substr($elem, length($path), 1) eq "/")));
}
return 0;
}
# Similar to `lib.isStorePath`
sub isStorePath {
my $path = shift;
my $storePath = "@storeDir@";
return substr($path, 0, 1) eq "/" && dirname($path) eq $storePath;
}
# For each activated package, determine what symlinks to create.
my %symlinks;
# Add all pathsToLink and all parent directories.
#
# For "/a/b/c" that will include
# [ "", "/a", "/a/b", "/a/b/c" ]
#
# That ensures the whole directory tree needed by pathsToLink is
# created as directories and not symlinks.
$symlinks{""} = ["", 0];
for my $p (@pathsToLink) {
my @parts = split '/', $p;
my $cur = "";
for my $x (@parts) {
$cur = $cur . "/$x";
$cur = "" if $cur eq "/";
$symlinks{$cur} = ["", 0];
}
}
sub findFiles;
sub findFilesInDir {
my ($relName, $target, $ignoreCollisions, $checkCollisionContents, $priority) = @_;
opendir DIR, "$target" or die "cannot open `$target': $!";
my @names = readdir DIR or die;
closedir DIR;
foreach my $name (@names) {
next if $name eq "." || $name eq "..";
findFiles("$relName/$name", "$target/$name", $name, $ignoreCollisions, $checkCollisionContents, $priority);
}
}
sub checkCollision {
my ($path1, $path2) = @_;
if (! -e $path1 || ! -e $path2) {
return 0;
}
my $stat1 = (stat($path1))[2];
my $stat2 = (stat($path2))[2];
if ($stat1 != $stat2) {
warn "different permissions in `$path1' and `$path2': "
. sprintf("%04o", $stat1 & 07777) . " <-> "
. sprintf("%04o", $stat2 & 07777);
return 0;
}
return compare($path1, $path2) == 0;
}
sub prependDangling {
my $path = shift;
return (-l $path && ! -e $path ? "dangling symlink " : "") . "`$path'";
}
sub findFiles {
my ($relName, $target, $baseName, $ignoreCollisions, $checkCollisionContents, $priority) = @_;
# The store path must not be a file
if (-f $target && isStorePath $target) {
die "The store path $target is a file and can't be merged into an environment using pkgs.buildEnv!";
}
# Urgh, hacky...
return if
$relName eq "/propagated-build-inputs" ||
$relName eq "/nix-support" ||
$relName =~ /info\/dir$/ ||
( $relName =~ /^\/share\/mime\// && !( $relName =~ /^\/share\/mime\/packages/ ) ) ||
$baseName eq "perllocal.pod" ||
$baseName eq "log" ||
! (hasPathsToLink($relName) || isInPathsToLink($relName));
my ($oldTarget, $oldPriority) = @{$symlinks{$relName} // [undef, undef]};
# If target doesn't exist, create it. If it already exists as a
# symlink to a file (not a directory) in a lower-priority package,
# overwrite it.
if (!defined $oldTarget || ($priority < $oldPriority && ($oldTarget ne "" && ! -d $oldTarget))) {
# If target is a dangling symlink, emit a warning.
if (-l $target && ! -e $target) {
my $link = readlink $target;
warn "creating dangling symlink `$out$extraPrefix/$relName' -> `$target' -> `$link'\n";
}
$symlinks{$relName} = [$target, $priority];
return;
}
# If target already exists and both targets resolves to the same path, skip
if (
defined $oldTarget && $oldTarget ne "" &&
defined abs_path($target) && defined abs_path($oldTarget) &&
abs_path($target) eq abs_path($oldTarget)
) {
# Prefer the target that is not a symlink, if any
if (-l $oldTarget && ! -l $target) {
$symlinks{$relName} = [$target, $priority];
}
return;
}
# If target already exists as a symlink to a file (not a
# directory) in a higher-priority package, skip.
if (defined $oldTarget && $priority > $oldPriority && $oldTarget ne "" && ! -d $oldTarget) {
return;
}
# If target is supposed to be a directory but it isn't, die with an error message
# instead of attempting to recurse into it, only to fail then.
# This happens e.g. when pathsToLink contains a non-directory path.
if ($oldTarget eq "" && ! -d $target) {
die "not a directory: `$target'\n";
}
unless (-d $target && ($oldTarget eq "" || -d $oldTarget)) {
# Prepend "dangling symlink" to paths if applicable.
my $targetRef = prependDangling($target);
my $oldTargetRef = prependDangling($oldTarget);
if ($ignoreCollisions) {
warn "collision between $targetRef and $oldTargetRef\n" if $ignoreCollisions == 1;
return;
} elsif ($checkCollisionContents && checkCollision($oldTarget, $target)) {
return;
} else {
die "collision between $targetRef and $oldTargetRef\n";
}
}
findFilesInDir($relName, $oldTarget, $ignoreCollisions, $checkCollisionContents, $oldPriority) unless $oldTarget eq "";
findFilesInDir($relName, $target, $ignoreCollisions, $checkCollisionContents, $priority);
$symlinks{$relName} = ["", $priority]; # denotes directory
}
my %done;
my %postponed;
sub addPkg {
my ($pkgDir, $ignoreCollisions, $checkCollisionContents, $priority) = @_;
return if (defined $done{$pkgDir});
$done{$pkgDir} = 1;
findFiles("", $pkgDir, "", $ignoreCollisions, $checkCollisionContents, $priority);
my $propagatedFN = "$pkgDir/nix-support/propagated-user-env-packages";
if (-e $propagatedFN) {
open PROP, "<$propagatedFN" or die;
my $propagated = <PROP>;
close PROP;
my @propagated = split ' ', $propagated;
foreach my $p (@propagated) {
$postponed{$p} = 1 unless defined $done{$p};
}
}
}
# Read packages list.
my $pkgs;
if (exists $ENV{"pkgsPath"}) {
open FILE, $ENV{"pkgsPath"};
$pkgs = <FILE>;
close FILE;
} else {
$pkgs = $ENV{"pkgs"}
}
# Symlink to the packages that have been installed explicitly by the
# user.
for my $pkg (@{decode_json $pkgs}) {
for my $path (@{$pkg->{paths}}) {
addPkg($path,
$ENV{"ignoreCollisions"} eq "1",
$ENV{"checkCollisionContents"} eq "1",
$pkg->{priority})
if -e $path;
}
}
# Symlink to the packages that have been "propagated" by packages
# installed by the user (i.e., package X declares that it wants Y
# installed as well). We do these later because they have a lower
# priority in case of collisions.
my $priorityCounter = 1000; # don't care about collisions
while (scalar(keys %postponed) > 0) {
my @pkgDirs = keys %postponed;
%postponed = ();
foreach my $pkgDir (sort @pkgDirs) {
addPkg($pkgDir, 2, $ENV{"checkCollisionContents"} eq "1", $priorityCounter++);
}
}
# Create the symlinks.
my $nrLinks = 0;
foreach my $relName (sort keys %symlinks) {
my ($target, $priority) = @{$symlinks{$relName}};
my $abs = "$out" . "$extraPrefix" . "/$relName";
next unless isInPathsToLink $relName;
if ($target eq "") {
#print "creating directory $relName\n";
mkpath $abs or die "cannot create directory `$abs': $!";
} else {
#print "creating symlink $relName to $target\n";
symlink $target, $abs ||
die "error creating link `$abs': $!";
$nrLinks++;
}
}
print STDERR "created $nrLinks symlinks in user environment\n";
my $manifest = $ENV{"manifest"};
if ($manifest) {
symlink($manifest, "$out/manifest") or die "cannot create manifest";
}

View file

@ -0,0 +1,81 @@
# buildEnv creates a tree of symlinks to the specified paths. This is
# a fork of the hardcoded buildEnv in the Nix distribution.
{ buildPackages, runCommand, lib, substituteAll }:
let
builder = substituteAll {
src = ./builder.pl;
inherit (builtins) storeDir;
};
in
lib.makeOverridable
({ name
, # The manifest file (if any). A symlink $out/manifest will be
# created to it.
manifest ? ""
, # The paths to symlink.
paths
, # Whether to ignore collisions or abort.
ignoreCollisions ? false
, # If there is a collision, check whether the contents and permissions match
# and only if not, throw a collision error.
checkCollisionContents ? true
, # The paths (relative to each element of `paths') that we want to
# symlink (e.g., ["/bin"]). Any file not inside any of the
# directories in the list is not symlinked.
pathsToLink ? ["/"]
, # The package outputs to include. By default, only the default
# output is included.
extraOutputsToInstall ? []
, # Root the result in directory "$out${extraPrefix}", e.g. "/share".
extraPrefix ? ""
, # Shell commands to run after building the symlink tree.
postBuild ? ""
# Additional inputs
, nativeBuildInputs ? [] # Handy e.g. if using makeWrapper in `postBuild`.
, buildInputs ? []
, passthru ? {}
, meta ? {}
}:
runCommand name
rec {
inherit manifest ignoreCollisions checkCollisionContents passthru
meta pathsToLink extraPrefix postBuild
nativeBuildInputs buildInputs;
pkgs = builtins.toJSON (map (drv: {
paths =
# First add the usual output(s): respect if user has chosen explicitly,
# and otherwise use `meta.outputsToInstall`. The attribute is guaranteed
# to exist in mkDerivation-created cases. The other cases (e.g. runCommand)
# aren't expected to have multiple outputs.
(if (! drv ? outputSpecified || ! drv.outputSpecified)
&& drv.meta.outputsToInstall or null != null
then map (outName: drv.${outName}) drv.meta.outputsToInstall
else [ drv ])
# Add any extra outputs specified by the caller of `buildEnv`.
++ lib.filter (p: p!=null)
(builtins.map (outName: drv.${outName} or null) extraOutputsToInstall);
priority = drv.meta.priority or 5;
}) paths);
preferLocalBuild = true;
allowSubstitutes = false;
# XXX: The size is somewhat arbitrary
passAsFile = if builtins.stringLength pkgs >= 128*1024 then [ "pkgs" ] else [ ];
}
''
${buildPackages.perl}/bin/perl -w ${builder}
eval "$postBuild"
'')

View file

@ -0,0 +1,11 @@
needsTarget=true
for p in "${params[@]}"; do
case "$p" in
-target | --target=*) needsTarget=false ;;
esac
done
if $needsTarget; then
extraBefore+=(-target @defaultTarget@ @march@)
fi

View file

@ -0,0 +1,87 @@
# N.B. It may be a surprise that the derivation-specific variables are exported,
# since this is just sourced by the wrapped binaries---the end consumers. This
# is because one wrapper binary may invoke another (e.g. cc invoking ld). In
# that case, it is cheaper/better to not repeat this step and let the forked
# wrapped binary just inherit the work of the forker's wrapper script.
var_templates_list=(
NIX_CFLAGS_COMPILE
NIX_CFLAGS_COMPILE_BEFORE
NIX_CFLAGS_LINK
NIX_CXXSTDLIB_COMPILE
NIX_CXXSTDLIB_LINK
NIX_GNATFLAGS_COMPILE
)
var_templates_bool=(
NIX_ENFORCE_NO_NATIVE
)
accumulateRoles
# We need to mangle names for hygiene, but also take parameters/overrides
# from the environment.
for var in "${var_templates_list[@]}"; do
mangleVarList "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
done
for var in "${var_templates_bool[@]}"; do
mangleVarBool "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
done
# `-B@out@/bin' forces cc to use ld-wrapper.sh when calling ld.
NIX_CFLAGS_COMPILE_@suffixSalt@="-B@out@/bin/ $NIX_CFLAGS_COMPILE_@suffixSalt@"
# Export and assign separately in order that a failing $(..) will fail
# the script.
# Currently bootstrap-tools does not split glibc, and gcc files into
# separate directories. As a workaround we want resulting cflags to be
# ordered as: crt1-cflags libc-cflags cc-cflags. Otherwise we mix crt/libc.so
# from different libc as seen in
# https://github.com/NixOS/nixpkgs/issues/158042
#
# Note that below has reverse ordering as we prepend flags one-by-one.
# Once bootstrap-tools is split into different directories we can stop
# relying on flag ordering below.
if [ -e @out@/nix-support/cc-cflags ]; then
NIX_CFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/cc-cflags) $NIX_CFLAGS_COMPILE_@suffixSalt@"
fi
if [[ "$cInclude" = 1 ]] && [ -e @out@/nix-support/libc-cflags ]; then
NIX_CFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/libc-cflags) $NIX_CFLAGS_COMPILE_@suffixSalt@"
fi
if [ -e @out@/nix-support/libc-crt1-cflags ]; then
NIX_CFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/libc-crt1-cflags) $NIX_CFLAGS_COMPILE_@suffixSalt@"
fi
if [ -e @out@/nix-support/libcxx-cxxflags ]; then
NIX_CXXSTDLIB_COMPILE_@suffixSalt@+=" $(< @out@/nix-support/libcxx-cxxflags)"
fi
if [ -e @out@/nix-support/libcxx-ldflags ]; then
NIX_CXXSTDLIB_LINK_@suffixSalt@+=" $(< @out@/nix-support/libcxx-ldflags)"
fi
if [ -e @out@/nix-support/gnat-cflags ]; then
NIX_GNATFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/gnat-cflags) $NIX_GNATFLAGS_COMPILE_@suffixSalt@"
fi
if [ -e @out@/nix-support/cc-ldflags ]; then
NIX_LDFLAGS_@suffixSalt@+=" $(< @out@/nix-support/cc-ldflags)"
fi
if [ -e @out@/nix-support/cc-cflags-before ]; then
NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@="$(< @out@/nix-support/cc-cflags-before) $NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@"
fi
# Only add darwin min version flag if a default darwin min version is set,
# which is a signal that we're targetting darwin.
if [ "@darwinMinVersion@" ]; then
mangleVarSingle @darwinMinVersionVariable@ ${role_suffixes[@]+"${role_suffixes[@]}"}
NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@="-m@darwinPlatformForCC@-version-min=${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@} $NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@"
fi
# That way forked processes will not extend these environment variables again.
export NIX_CC_WRAPPER_FLAGS_SET_@suffixSalt@=1

View file

@ -0,0 +1,23 @@
# See add-flags.sh in cc-wrapper for comments.
var_templates_list=(
NIX_GNATMAKE_CARGS
)
accumulateRoles
for var in "${var_templates_list[@]}"; do
mangleVarList "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
done
# `-B@out@/bin' forces cc to use wrapped as instead of the system one.
NIX_GNATMAKE_CARGS_@suffixSalt@="$NIX_GNATMAKE_CARGS_@suffixSalt@ -B@out@/bin/"
# Only add darwin min version flag if a default darwin min version is set,
# which is a signal that we're targetting darwin.
if [ "@darwinMinVersion@" ]; then
mangleVarSingle @darwinMinVersionVariable@ ${role_suffixes[@]+"${role_suffixes[@]}"}
NIX_GNATMAKE_CARGS_@suffixSalt@="-m@darwinPlatformForCC@-version-min=${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@} $NIX_GNATMAKE_CARGS_@suffixSalt@"
fi
export NIX_GNAT_WRAPPER_EXTRA_FLAGS_SET_@suffixSalt@=1

View file

@ -0,0 +1,126 @@
declare -a hardeningCFlagsAfter=()
declare -a hardeningCFlagsBefore=()
declare -A hardeningEnableMap=()
# Intentionally word-split in case 'NIX_HARDENING_ENABLE' is defined in Nix. The
# array expansion also prevents undefined variables from causing trouble with
# `set -u`.
for flag in ${NIX_HARDENING_ENABLE_@suffixSalt@-}; do
hardeningEnableMap["$flag"]=1
done
# fortify3 implies fortify enablement - make explicit before
# we filter unsupported flags because unsupporting fortify3
# doesn't mean we should unsupport fortify too
if [[ -n "${hardeningEnableMap[fortify3]-}" ]]; then
hardeningEnableMap["fortify"]=1
fi
# Remove unsupported flags.
for flag in @hardening_unsupported_flags@; do
unset -v "hardeningEnableMap[$flag]"
# fortify being unsupported implies fortify3 is unsupported
if [[ "$flag" = 'fortify' ]] ; then
unset -v "hardeningEnableMap['fortify3']"
fi
done
# now make fortify and fortify3 mutually exclusive
if [[ -n "${hardeningEnableMap[fortify3]-}" ]]; then
unset -v "hardeningEnableMap['fortify']"
fi
if (( "${NIX_DEBUG:-0}" >= 1 )); then
declare -a allHardeningFlags=(fortify fortify3 stackprotector pie pic strictoverflow format trivialautovarinit zerocallusedregs)
declare -A hardeningDisableMap=()
# Determine which flags were effectively disabled so we can report below.
for flag in "${allHardeningFlags[@]}"; do
if [[ -z "${hardeningEnableMap[$flag]-}" ]]; then
hardeningDisableMap["$flag"]=1
fi
done
printf 'HARDENING: disabled flags:' >&2
(( "${#hardeningDisableMap[@]}" )) && printf ' %q' "${!hardeningDisableMap[@]}" >&2
echo >&2
if (( "${#hardeningEnableMap[@]}" )); then
echo 'HARDENING: Is active (not completely disabled with "all" flag)' >&2;
fi
fi
for flag in "${!hardeningEnableMap[@]}"; do
case $flag in
fortify | fortify3)
# Use -U_FORTIFY_SOURCE to avoid warnings on toolchains that explicitly
# set -D_FORTIFY_SOURCE=0 (like 'clang -fsanitize=address').
hardeningCFlagsBefore+=('-O2' '-U_FORTIFY_SOURCE')
# Unset any _FORTIFY_SOURCE values the command-line may have set before
# enforcing our own value, avoiding (potentially fatal) redefinition
# warnings
hardeningCFlagsAfter+=('-U_FORTIFY_SOURCE')
case $flag in
fortify)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling fortify >&2; fi
hardeningCFlagsAfter+=('-D_FORTIFY_SOURCE=2')
;;
fortify3)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling fortify3 >&2; fi
hardeningCFlagsAfter+=('-D_FORTIFY_SOURCE=3')
;;
*)
# Ignore unsupported.
;;
esac
;;
stackprotector)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling stackprotector >&2; fi
hardeningCFlagsBefore+=('-fstack-protector-strong' '--param' 'ssp-buffer-size=4')
;;
pie)
# NB: we do not use `+=` here, because PIE flags must occur before any PIC flags
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling CFlags -fPIE >&2; fi
hardeningCFlagsBefore=('-fPIE' "${hardeningCFlagsBefore[@]}")
if [[ ! (" ${params[*]} " =~ " -shared " || " ${params[*]} " =~ " -static ") ]]; then
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling LDFlags -pie >&2; fi
hardeningCFlagsBefore=('-pie' "${hardeningCFlagsBefore[@]}")
fi
;;
pic)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling pic >&2; fi
hardeningCFlagsBefore+=('-fPIC')
;;
strictoverflow)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling strictoverflow >&2; fi
if (( @isClang@ )); then
# In Clang, -fno-strict-overflow only serves to set -fwrapv and is
# reported as an unused CLI argument if -fwrapv or -fno-wrapv is set
# explicitly, so we side step that by doing the conversion here.
#
# See: https://github.com/llvm/llvm-project/blob/llvmorg-16.0.6/clang/lib/Driver/ToolChains/Clang.cpp#L6315
#
hardeningCFlagsBefore+=('-fwrapv')
else
hardeningCFlagsBefore+=('-fno-strict-overflow')
fi
;;
trivialautovarinit)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling trivialautovarinit >&2; fi
hardeningCFlagsBefore+=('-ftrivial-auto-var-init=pattern')
;;
format)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling format >&2; fi
hardeningCFlagsBefore+=('-Wformat' '-Wformat-security' '-Werror=format-security')
;;
zerocallusedregs)
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling zerocallusedregs >&2; fi
hardeningCFlagsBefore+=('-fzero-call-used-regs=used-gpr')
;;
*)
# Ignore unsupported. Checked in Nix that at least *some*
# tool supports each flag.
;;
esac
done

View file

@ -0,0 +1,261 @@
#! @shell@
set -eu -o pipefail +o posix
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
path_backup="$PATH"
# That @-vars are substituted separately from bash evaluation makes
# shellcheck think this, and others like it, are useless conditionals.
# shellcheck disable=SC2157
if [[ -n "@coreutils_bin@" && -n "@gnugrep_bin@" ]]; then
PATH="@coreutils_bin@/bin:@gnugrep_bin@/bin"
fi
source @out@/nix-support/utils.bash
# Parse command line options and set several variables.
# For instance, figure out if linker flags should be passed.
# GCC prints annoying warnings when they are not needed.
dontLink=0
nonFlagArgs=0
cc1=0
# shellcheck disable=SC2193
[[ "@prog@" = *++ ]] && isCxx=1 || isCxx=0
cxxInclude=1
cxxLibrary=1
cInclude=1
expandResponseParams "$@"
declare -ag positionalArgs=()
declare -i n=0
nParams=${#params[@]}
while (( "$n" < "$nParams" )); do
p=${params[n]}
p2=${params[n+1]:-} # handle `p` being last one
n+=1
case "$p" in
-[cSEM] | -MM) dontLink=1 ;;
-cc1) cc1=1 ;;
-nostdinc) cInclude=0 cxxInclude=0 ;;
-nostdinc++) cxxInclude=0 ;;
-nostdlib) cxxLibrary=0 ;;
-x*-header) dontLink=1 ;; # both `-x c-header` and `-xc-header` are accepted by clang
-xc++*) isCxx=1 ;; # both `-xc++` and `-x c++` are accepted by clang
-x)
case "$p2" in
*-header) dontLink=1 ;;
c++*) isCxx=1 ;;
esac
;;
--) # Everything else is positional args!
# See: https://github.com/llvm/llvm-project/commit/ed1d07282cc9d8e4c25d585e03e5c8a1b6f63a74
# Any positional arg (i.e. any argument after `--`) will be
# interpreted as a "non flag" arg:
if [[ -v "params[$n]" ]]; then nonFlagArgs=1; fi
positionalArgs=("${params[@]:$n}")
params=("${params[@]:0:$((n - 1))}")
break;
;;
-?*) ;;
*) nonFlagArgs=1 ;; # Includes a solitary dash (`-`) which signifies standard input; it is not a flag
esac
done
# If we pass a flag like -Wl, then gcc will call the linker unless it
# can figure out that it has to do something else (e.g., because of a
# "-c" flag). So if no non-flag arguments are given, don't pass any
# linker flags. This catches cases like "gcc" (should just print
# "gcc: no input files") and "gcc -v" (should print the version).
if [ "$nonFlagArgs" = 0 ]; then
dontLink=1
fi
# Optionally filter out paths not refering to the store.
if [[ "${NIX_ENFORCE_PURITY:-}" = 1 && -n "$NIX_STORE" ]]; then
kept=()
nParams=${#params[@]}
declare -i n=0
while (( "$n" < "$nParams" )); do
p=${params[n]}
p2=${params[n+1]:-} # handle `p` being last one
n+=1
skipNext=false
path=""
case "$p" in
-[IL]/*) path=${p:2} ;;
-[IL] | -isystem) path=$p2 skipNext=true ;;
esac
if [[ -n $path ]] && badPath "$path"; then
skip "$path"
$skipNext && n+=1
continue
fi
kept+=("$p")
done
# Old bash empty array hack
params=(${kept+"${kept[@]}"})
fi
# Flirting with a layer violation here.
if [ -z "${NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
source @bintools@/nix-support/add-flags.sh
fi
# Put this one second so libc ldflags take priority.
if [ -z "${NIX_CC_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
source @out@/nix-support/add-flags.sh
fi
# Clear march/mtune=native -- they bring impurity.
if [ "$NIX_ENFORCE_NO_NATIVE_@suffixSalt@" = 1 ]; then
kept=()
# Old bash empty array hack
for p in ${params+"${params[@]}"}; do
if [[ "$p" = -m*=native ]]; then
skip "$p"
else
kept+=("$p")
fi
done
# Old bash empty array hack
params=(${kept+"${kept[@]}"})
fi
if [[ "$isCxx" = 1 ]]; then
if [[ "$cxxInclude" = 1 ]]; then
#
# The motivation for this comment is to explain the reason for appending
# the C++ stdlib to NIX_CFLAGS_COMPILE, which I initially thought should
# change and later realized it shouldn't in:
#
# https://github.com/NixOS/nixpkgs/pull/185569#issuecomment-1234959249
#
# NIX_CFLAGS_COMPILE contains dependencies added using "-isystem", and
# NIX_CXXSTDLIB_COMPILE adds the C++ stdlib using "-isystem". Appending
# NIX_CXXSTDLIB_COMPILE to NIX_CLAGS_COMPILE emulates this part of the
# include lookup order from GCC/Clang:
#
# > 4. Directories specified with -isystem options are scanned in
# > left-to-right order.
# > 5. Standard system directories are scanned.
# > 6. Directories specified with -idirafter options are scanned
# > in left-to-right order.
#
# NIX_CXX_STDLIB_COMPILE acts as the "standard system directories" that
# are otherwise missing from CC in nixpkgs, so should be added last.
#
# This means that the C standard library should never be present inside
# NIX_CFLAGS_COMPILE, because it MUST come after the C++ stdlib. It is
# added automatically by cc-wrapper later using "-idirafter".
#
NIX_CFLAGS_COMPILE_@suffixSalt@+=" $NIX_CXXSTDLIB_COMPILE_@suffixSalt@"
fi
if [[ "$cxxLibrary" = 1 ]]; then
NIX_CFLAGS_LINK_@suffixSalt@+=" $NIX_CXXSTDLIB_LINK_@suffixSalt@"
fi
fi
source @out@/nix-support/add-hardening.sh
# Add the flags for the C compiler proper.
extraAfter=(${hardeningCFlagsAfter[@]+"${hardeningCFlagsAfter[@]}"} $NIX_CFLAGS_COMPILE_@suffixSalt@)
extraBefore=(${hardeningCFlagsBefore[@]+"${hardeningCFlagsBefore[@]}"} $NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@)
if [ "$dontLink" != 1 ]; then
linkType=$(checkLinkType $NIX_LDFLAGS_BEFORE_@suffixSalt@ "${params[@]}" ${NIX_CFLAGS_LINK_@suffixSalt@:-} $NIX_LDFLAGS_@suffixSalt@)
# Add the flags that should only be passed to the compiler when
# linking.
extraAfter+=($(filterRpathFlags "$linkType" $NIX_CFLAGS_LINK_@suffixSalt@))
# Add the flags that should be passed to the linker (and prevent
# `ld-wrapper' from adding NIX_LDFLAGS_@suffixSalt@ again).
for i in $(filterRpathFlags "$linkType" $NIX_LDFLAGS_BEFORE_@suffixSalt@); do
extraBefore+=("-Wl,$i")
done
if [[ "$linkType" == dynamic && -n "$NIX_DYNAMIC_LINKER_@suffixSalt@" ]]; then
extraBefore+=("-Wl,-dynamic-linker=$NIX_DYNAMIC_LINKER_@suffixSalt@")
fi
for i in $(filterRpathFlags "$linkType" $NIX_LDFLAGS_@suffixSalt@); do
if [ "${i:0:3}" = -L/ ]; then
extraAfter+=("$i")
else
extraAfter+=("-Wl,$i")
fi
done
export NIX_LINK_TYPE_@suffixSalt@=$linkType
fi
if [[ -e @out@/nix-support/add-local-cc-cflags-before.sh ]]; then
source @out@/nix-support/add-local-cc-cflags-before.sh
fi
# As a very special hack, if the arguments are just `-v', then don't
# add anything. This is to prevent `gcc -v' (which normally prints
# out the version number and returns exit code 0) from printing out
# `No input files specified' and returning exit code 1.
if [ "$*" = -v ]; then
extraAfter=()
extraBefore=()
fi
# clang's -cc1 mode is not compatible with most options
# that we would pass. Rather than trying to pass only
# options that would work, let's just remove all of them.
if [ "$cc1" = 1 ]; then
extraAfter=()
extraBefore=()
fi
# Finally, if we got any positional args, append them to `extraAfter`
# now:
if [[ "${#positionalArgs[@]}" -gt 0 ]]; then
extraAfter+=(-- "${positionalArgs[@]}")
fi
# Optionally print debug info.
if (( "${NIX_DEBUG:-0}" >= 1 )); then
# Old bash workaround, see ld-wrapper for explanation.
echo "extra flags before to @prog@:" >&2
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
echo "original flags to @prog@:" >&2
printf " %q\n" ${params+"${params[@]}"} >&2
echo "extra flags after to @prog@:" >&2
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
fi
PATH="$path_backup"
# Old bash workaround, see above.
# if a cc-wrapper-hook exists, run it.
if [[ -e @out@/nix-support/cc-wrapper-hook ]]; then
compiler=@prog@
source @out@/nix-support/cc-wrapper-hook
fi
if (( "${NIX_CC_USE_RESPONSE_FILE:-@use_response_file_by_default@}" >= 1 )); then
responseFile=$(mktemp "${TMPDIR:-/tmp}/cc-params.XXXXXX")
trap 'rm -f -- "$responseFile"' EXIT
printf "%q\n" \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"} > "$responseFile"
@prog@ "@$responseFile"
else
exec @prog@ \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}
fi

View file

@ -0,0 +1,764 @@
# The Nixpkgs CC is not directly usable, since it doesn't know where
# the C library and standard header files are. Therefore the compiler
# produced by that package cannot be installed directly in a user
# environment and used from the command line. So we use a wrapper
# script that sets up the right environment variables so that the
# compiler and the linker just "work".
{ name ? ""
, lib
, stdenvNoCC
, runtimeShell
, cc ? null, libc ? null, bintools, coreutils ? null
, zlib ? null
, nativeTools, noLibc ? false, nativeLibc, nativePrefix ? ""
, propagateDoc ? cc != null && cc ? man
, extraTools ? [], extraPackages ? [], extraBuildCommands ? ""
, nixSupport ? {}
, isGNU ? false, isClang ? cc.isClang or false, isCcache ? cc.isCcache or false, gnugrep ? null
, expand-response-params
, libcxx ? null
# Whether or not to add `-B` and `-L` to `nix-support/cc-{c,ld}flags`
, useCcForLibs ?
# Always add these flags for Clang, because in order to compile (most
# software) it needs libraries that are shipped and compiled with gcc.
if isClang then true
# Never add these flags for a build!=host cross-compiler or a host!=target
# ("cross-built-native") compiler; currently nixpkgs has a special build
# path for these (`crossStageStatic`). Hopefully at some point that build
# path will be merged with this one and this conditional will be removed.
else if (with stdenvNoCC; buildPlatform != hostPlatform || hostPlatform != targetPlatform) then false
# Never add these flags when wrapping the bootstrapFiles' compiler; it has a
# /usr/-like layout with everything smashed into a single outpath, so it has
# no trouble finding its own libraries.
else if (cc.passthru.isFromBootstrapFiles or false) then false
# Add these flags when wrapping `xgcc` (the first compiler that nixpkgs builds)
else if (cc.passthru.isXgcc or false) then true
# Add these flags when wrapping `stdenv.cc`
else if (cc.stdenv.cc.cc.passthru.isXgcc or false) then true
# Do not add these flags in any other situation. This is `false` mainly to
# prevent these flags from being added when wrapping *old* versions of gcc
# (e.g. `gcc6Stdenv`), since they will cause the old gcc to get `-B` and
# `-L` flags pointing at the new gcc's libstdc++ headers. Example failure:
# https://hydra.nixos.org/build/213125495
else false
# the derivation at which the `-B` and `-L` flags added by `useCcForLibs` will point
, gccForLibs ? if useCcForLibs then cc else null
, fortify-headers ? null
, includeFortifyHeaders ? null
}:
assert nativeTools -> !propagateDoc && nativePrefix != "";
assert !nativeTools -> cc != null && coreutils != null && gnugrep != null;
assert !(nativeLibc && noLibc);
assert (noLibc || nativeLibc) == (libc == null);
let
inherit (lib)
attrByPath
concatMapStrings
concatStringsSep
escapeShellArg
getBin
getDev
getLib
getName
getVersion
mapAttrsToList
optional
optionalAttrs
optionals
optionalString
removePrefix
replaceStrings
toList
versionAtLeast
;
inherit (stdenvNoCC) hostPlatform targetPlatform;
includeFortifyHeaders' = if includeFortifyHeaders != null
then includeFortifyHeaders
else (targetPlatform.libc == "musl" && isGNU);
# Prefix for binaries. Customarily ends with a dash separator.
#
# TODO(@Ericson2314) Make unconditional, or optional but always true by default.
targetPrefix = optionalString (targetPlatform != hostPlatform) (targetPlatform.config + "-");
ccVersion = getVersion cc;
ccName = removePrefix targetPrefix (getName cc);
libc_bin = optionalString (libc != null) (getBin libc);
libc_dev = optionalString (libc != null) (getDev libc);
libc_lib = optionalString (libc != null) (getLib libc);
cc_solib = getLib cc
+ optionalString (targetPlatform != hostPlatform) "/${targetPlatform.config}";
# The wrapper scripts use 'cat' and 'grep', so we may need coreutils.
coreutils_bin = optionalString (!nativeTools) (getBin coreutils);
# The "suffix salt" is a arbitrary string added in the end of env vars
# defined by cc-wrapper's hooks so that multiple cc-wrappers can be used
# without interfering. For the moment, it is defined as the target triple,
# adjusted to be a valid bash identifier. This should be considered an
# unstable implementation detail, however.
suffixSalt = replaceStrings ["-" "."] ["_" "_"] targetPlatform.config;
useGccForLibs = useCcForLibs
&& libcxx == null
&& !targetPlatform.isDarwin
&& !(targetPlatform.useLLVM or false)
&& !(targetPlatform.useAndroidPrebuilt or false)
&& !(targetPlatform.isiOS or false)
&& gccForLibs != null;
gccForLibs_solib = getLib gccForLibs
+ optionalString (targetPlatform != hostPlatform) "/${targetPlatform.config}";
# Analogously to cc_solib and gccForLibs_solib
libcxx_solib = "${getLib libcxx}/lib";
# The following two functions, `isGccArchSupported` and
# `isGccTuneSupported`, only handle those situations where a flag
# (`-march` or `-mtune`) is accepted by one compiler but rejected
# by another, and both compilers are relevant to nixpkgs. We are
# not trying to maintain a complete list of all flags accepted by
# all versions of all compilers ever in nixpkgs.
#
# The two main cases of interest are:
#
# - One compiler is gcc and the other is clang
# - One compiler is pkgs.gcc and the other is bootstrap-files.gcc
# -- older compilers (for example bootstrap's GCC 5) fail with
# -march=too-modern-cpu
isGccArchSupported = arch:
if targetPlatform.isPower then false else # powerpc does not allow -march=
if isGNU then
{ # Generic
x86-64-v2 = versionAtLeast ccVersion "11.0";
x86-64-v3 = versionAtLeast ccVersion "11.0";
x86-64-v4 = versionAtLeast ccVersion "11.0";
# Intel
skylake = versionAtLeast ccVersion "6.0";
skylake-avx512 = versionAtLeast ccVersion "6.0";
cannonlake = versionAtLeast ccVersion "8.0";
icelake-client = versionAtLeast ccVersion "8.0";
icelake-server = versionAtLeast ccVersion "8.0";
cascadelake = versionAtLeast ccVersion "9.0";
cooperlake = versionAtLeast ccVersion "10.0";
tigerlake = versionAtLeast ccVersion "10.0";
knm = versionAtLeast ccVersion "8.0";
alderlake = versionAtLeast ccVersion "12.0";
# AMD
znver1 = versionAtLeast ccVersion "6.0";
znver2 = versionAtLeast ccVersion "9.0";
znver3 = versionAtLeast ccVersion "11.0";
znver4 = versionAtLeast ccVersion "13.0";
}.${arch} or true
else if isClang then
{ #Generic
x86-64-v2 = versionAtLeast ccVersion "12.0";
x86-64-v3 = versionAtLeast ccVersion "12.0";
x86-64-v4 = versionAtLeast ccVersion "12.0";
# Intel
cannonlake = versionAtLeast ccVersion "5.0";
icelake-client = versionAtLeast ccVersion "7.0";
icelake-server = versionAtLeast ccVersion "7.0";
knm = versionAtLeast ccVersion "7.0";
alderlake = versionAtLeast ccVersion "16.0";
# AMD
znver1 = versionAtLeast ccVersion "4.0";
znver2 = versionAtLeast ccVersion "9.0";
znver3 = versionAtLeast ccVersion "12.0";
znver4 = versionAtLeast ccVersion "16.0";
}.${arch} or true
else
false;
isGccTuneSupported = tune:
# for x86 -mtune= takes the same values as -march, plus two more:
if targetPlatform.isx86 then
{
generic = true;
intel = true;
}.${tune} or (isGccArchSupported tune)
# on arm64, the -mtune= values are specific processors
else if targetPlatform.isAarch64 then
(if isGNU then
{
cortex-a53 = versionAtLeast ccVersion "4.8"; # gcc 8c075f
cortex-a72 = versionAtLeast ccVersion "5.1"; # gcc d8f70d
"cortex-a72.cortex-a53" = versionAtLeast ccVersion "5.1"; # gcc d8f70d
}.${tune} or false
else if isClang then
{
cortex-a53 = versionAtLeast ccVersion "3.9"; # llvm dfc5d1
}.${tune} or false
else false)
else if targetPlatform.isPower then
# powerpc does not support -march
true
else if targetPlatform.isMips then
# for mips -mtune= takes the same values as -march
isGccArchSupported tune
else
false;
# Clang does not support as many `-mtune=` values as gcc does;
# this function will return the best possible approximation of the
# provided `-mtune=` value, or `null` if none exists.
#
# Note: this function can make use of ccVersion; for example, `if
# versionOlder ccVersion "12" then ...`
findBestTuneApproximation = tune:
let guess = if isClang
then {
# clang does not tune for big.LITTLE chips
"cortex-a72.cortex-a53" = "cortex-a72";
}.${tune} or tune
else tune;
in if isGccTuneSupported guess
then guess
else null;
defaultHardeningFlags = bintools.defaultHardeningFlags or [];
# if cc.hardeningUnsupportedFlagsByTargetPlatform exists, this is
# called with the targetPlatform as an argument and
# cc.hardeningUnsupportedFlags is completely ignored - the function
# is responsible for including the constant hardeningUnsupportedFlags
# list however it sees fit.
ccHardeningUnsupportedFlags = if cc ? hardeningUnsupportedFlagsByTargetPlatform
then cc.hardeningUnsupportedFlagsByTargetPlatform targetPlatform
else (cc.hardeningUnsupportedFlags or []);
darwinPlatformForCC = optionalString targetPlatform.isDarwin (
if (targetPlatform.darwinPlatform == "macos" && isGNU) then "macosx"
else targetPlatform.darwinPlatform
);
darwinMinVersion = optionalString targetPlatform.isDarwin (
targetPlatform.darwinMinVersion
);
darwinMinVersionVariable = optionalString targetPlatform.isDarwin
targetPlatform.darwinMinVersionVariable;
in
assert includeFortifyHeaders' -> fortify-headers != null;
# Ensure bintools matches
assert libc_bin == bintools.libc_bin;
assert libc_dev == bintools.libc_dev;
assert libc_lib == bintools.libc_lib;
assert nativeTools == bintools.nativeTools;
assert nativeLibc == bintools.nativeLibc;
assert nativePrefix == bintools.nativePrefix;
stdenvNoCC.mkDerivation {
pname = targetPrefix
+ (if name != "" then name else "${ccName}-wrapper");
version = optionalString (cc != null) ccVersion;
preferLocalBuild = true;
outputs = [ "out" ] ++ optionals propagateDoc [ "man" "info" ];
passthru = {
inherit targetPrefix suffixSalt;
# "cc" is the generic name for a C compiler, but there is no one for package
# providing the linker and related tools. The two we use now are GNU
# Binutils, and Apple's "cctools"; "bintools" as an attempt to find an
# unused middle-ground name that evokes both.
inherit bintools;
inherit cc libc libcxx nativeTools nativeLibc nativePrefix isGNU isClang;
emacsBufferSetup = pkgs: ''
; We should handle propagation here too
(mapc
(lambda (arg)
(when (file-directory-p (concat arg "/include"))
(setenv "NIX_CFLAGS_COMPILE_${suffixSalt}" (concat (getenv "NIX_CFLAGS_COMPILE_${suffixSalt}") " -isystem " arg "/include"))))
'(${concatStringsSep " " (map (pkg: "\"${pkg}\"") pkgs)}))
'';
# Expose expand-response-params we are /actually/ using. In stdenv
# bootstrapping, expand-response-params usually comes from an earlier stage,
# so it is important to expose this for reference checking.
inherit expand-response-params;
inherit nixSupport;
inherit defaultHardeningFlags;
};
dontBuild = true;
dontConfigure = true;
enableParallelBuilding = true;
unpackPhase = ''
src=$PWD
'';
wrapper = ./cc-wrapper.sh;
installPhase =
''
mkdir -p $out/bin $out/nix-support
wrap() {
local dst="$1"
local wrapper="$2"
export prog="$3"
export use_response_file_by_default=${if isClang && !isCcache then "1" else "0"}
substituteAll "$wrapper" "$out/bin/$dst"
chmod +x "$out/bin/$dst"
}
''
+ (if nativeTools then ''
echo ${if targetPlatform.isDarwin then cc else nativePrefix} > $out/nix-support/orig-cc
ccPath="${if targetPlatform.isDarwin then cc else nativePrefix}/bin"
'' else ''
echo $cc > $out/nix-support/orig-cc
ccPath="${cc}/bin"
'')
# Create symlinks to everything in the bintools wrapper.
+ ''
for bbin in $bintools/bin/*; do
mkdir -p "$out/bin"
ln -s "$bbin" "$out/bin/$(basename $bbin)"
done
''
# We export environment variables pointing to the wrapped nonstandard
# cmds, lest some lousy configure script use those to guess compiler
# version.
+ ''
export named_cc=${targetPrefix}cc
export named_cxx=${targetPrefix}c++
if [ -e $ccPath/${targetPrefix}gcc ]; then
wrap ${targetPrefix}gcc $wrapper $ccPath/${targetPrefix}gcc
ln -s ${targetPrefix}gcc $out/bin/${targetPrefix}cc
export named_cc=${targetPrefix}gcc
export named_cxx=${targetPrefix}g++
elif [ -e $ccPath/clang ]; then
wrap ${targetPrefix}clang $wrapper $ccPath/clang
ln -s ${targetPrefix}clang $out/bin/${targetPrefix}cc
export named_cc=${targetPrefix}clang
export named_cxx=${targetPrefix}clang++
fi
if [ -e $ccPath/${targetPrefix}g++ ]; then
wrap ${targetPrefix}g++ $wrapper $ccPath/${targetPrefix}g++
ln -s ${targetPrefix}g++ $out/bin/${targetPrefix}c++
elif [ -e $ccPath/clang++ ]; then
wrap ${targetPrefix}clang++ $wrapper $ccPath/clang++
ln -s ${targetPrefix}clang++ $out/bin/${targetPrefix}c++
fi
if [ -e $ccPath/${targetPrefix}cpp ]; then
wrap ${targetPrefix}cpp $wrapper $ccPath/${targetPrefix}cpp
elif [ -e $ccPath/cpp ]; then
wrap ${targetPrefix}cpp $wrapper $ccPath/cpp
fi
''
# No need to wrap gnat, gnatkr, gnatname or gnatprep; we can just symlink them in
+ optionalString cc.langAda or false ''
for cmd in gnatbind gnatchop gnatclean gnatlink gnatls gnatmake; do
wrap ${targetPrefix}$cmd ${./gnat-wrapper.sh} $ccPath/${targetPrefix}$cmd
done
for cmd in gnat gnatkr gnatname gnatprep; do
ln -s $ccPath/${targetPrefix}$cmd $out/bin/${targetPrefix}$cmd
done
# this symlink points to the unwrapped gnat's output "out". It is used by
# our custom gprconfig compiler description to find GNAT's ada runtime. See
# ../../development/ada-modules/gprbuild/{boot.nix, nixpkgs-gnat.xml}
ln -sf ${cc} $out/nix-support/gprconfig-gnat-unwrapped
''
+ optionalString cc.langD or false ''
wrap ${targetPrefix}gdc $wrapper $ccPath/${targetPrefix}gdc
''
+ optionalString cc.langFortran or false ''
wrap ${targetPrefix}gfortran $wrapper $ccPath/${targetPrefix}gfortran
ln -sv ${targetPrefix}gfortran $out/bin/${targetPrefix}g77
ln -sv ${targetPrefix}gfortran $out/bin/${targetPrefix}f77
export named_fc=${targetPrefix}gfortran
''
+ optionalString cc.langJava or false ''
wrap ${targetPrefix}gcj $wrapper $ccPath/${targetPrefix}gcj
''
+ optionalString cc.langGo or false ''
wrap ${targetPrefix}gccgo $wrapper $ccPath/${targetPrefix}gccgo
wrap ${targetPrefix}go ${./go-wrapper.sh} $ccPath/${targetPrefix}go
'';
strictDeps = true;
propagatedBuildInputs = [ bintools ] ++ extraTools ++ optionals cc.langD or cc.langJava or false [ zlib ];
depsTargetTargetPropagated = optional (libcxx != null) libcxx ++ extraPackages;
setupHooks = [
../setup-hooks/role.bash
] ++ optional (cc.langC or true) ./setup-hook.sh
++ optional (cc.langFortran or false) ./fortran-hook.sh
++ optional (targetPlatform.isWindows) (stdenvNoCC.mkDerivation {
name = "win-dll-hook.sh";
dontUnpack = true;
installPhase = ''
echo addToSearchPath "LINK_DLL_FOLDERS" "${cc_solib}/lib" > $out
echo addToSearchPath "LINK_DLL_FOLDERS" "${cc_solib}/lib64" >> $out
echo addToSearchPath "LINK_DLL_FOLDERS" "${cc_solib}/lib32" >> $out
'';
});
postFixup =
# Ensure flags files exists, as some other programs cat them. (That these
# are considered an exposed interface is a bit dubious, but fine for now.)
''
touch "$out/nix-support/cc-cflags"
touch "$out/nix-support/cc-ldflags"
''
# Backwards compatibility for packages expecting this file, e.g. with
# `$NIX_CC/nix-support/dynamic-linker`.
#
# TODO(@Ericson2314): Remove this after stable release and force
# everyone to refer to bintools-wrapper directly.
+ ''
if [[ -f "$bintools/nix-support/dynamic-linker" ]]; then
ln -s "$bintools/nix-support/dynamic-linker" "$out/nix-support"
fi
if [[ -f "$bintools/nix-support/dynamic-linker-m32" ]]; then
ln -s "$bintools/nix-support/dynamic-linker-m32" "$out/nix-support"
fi
''
##
## GCC libs for non-GCC support
##
+ optionalString (useGccForLibs && isClang) ''
echo "-B${gccForLibs}/lib/gcc/${targetPlatform.config}/${gccForLibs.version}" >> $out/nix-support/cc-cflags
''
+ optionalString useGccForLibs ''
echo "-L${gccForLibs}/lib/gcc/${targetPlatform.config}/${gccForLibs.version}" >> $out/nix-support/cc-ldflags
echo "-L${gccForLibs_solib}/lib" >> $out/nix-support/cc-ldflags
''
# TODO We would like to connect this to `useGccForLibs`, but we cannot yet
# because `libcxxStdenv` on linux still needs this. Maybe someday we'll
# always set `useLLVM` on Darwin, and maybe also break down `useLLVM` into
# fine-grained use flags (libgcc vs compiler-rt, ld.lld vs legacy, libc++
# vs libstdc++, etc.) since Darwin isn't `useLLVM` on all counts. (See
# https://clang.llvm.org/docs/Toolchain.html for all the axes one might
# break `useLLVM` into.)
+ optionalString (isClang
&& targetPlatform.isLinux
&& !(targetPlatform.useAndroidPrebuilt or false)
&& !(targetPlatform.useLLVM or false)
&& gccForLibs != null) (''
echo "--gcc-toolchain=${gccForLibs}" >> $out/nix-support/cc-cflags
# Pull in 'cc.out' target to get 'libstdc++fs.a'. It should be in
# 'cc.lib'. But it's a gcc package bug.
# TODO(trofi): remove once gcc is fixed to move libraries to .lib output.
echo "-L${gccForLibs}/${optionalString (targetPlatform != hostPlatform) "/${targetPlatform.config}"}/lib" >> $out/nix-support/cc-ldflags
''
# this ensures that when clang passes -lgcc_s to lld (as it does
# when building e.g. firefox), lld is able to find libgcc_s.so
+ concatMapStrings (libgcc: ''
echo "-L${libgcc}/lib" >> $out/nix-support/cc-ldflags
'') (toList (gccForLibs.libgcc or [])))
##
## General libc support
##
# The "-B${libc_lib}/lib/" flag is a quick hack to force gcc to link
# against the crt1.o from our own glibc, rather than the one in
# /usr/lib. (This is only an issue when using an `impure'
# compiler/linker, i.e., one that searches /usr/lib and so on.)
#
# Unfortunately, setting -B appears to override the default search
# path. Thus, the gcc-specific "../includes-fixed" directory is
# now longer searched and glibc's <limits.h> header fails to
# compile, because it uses "#include_next <limits.h>" to find the
# limits.h file in ../includes-fixed. To remedy the problem,
# another -idirafter is necessary to add that directory again.
+ optionalString (libc != null) (''
touch "$out/nix-support/libc-cflags"
touch "$out/nix-support/libc-ldflags"
echo "-B${libc_lib}${libc.libdir or "/lib/"}" >> $out/nix-support/libc-crt1-cflags
'' + optionalString (!(cc.langD or false)) ''
echo "-idirafter ${libc_dev}${libc.incdir or "/include"}" >> $out/nix-support/libc-cflags
'' + optionalString (isGNU && (!(cc.langD or false))) ''
for dir in "${cc}"/lib/gcc/*/*/include-fixed; do
echo '-idirafter' ''${dir} >> $out/nix-support/libc-cflags
done
'' + ''
echo "${libc_lib}" > $out/nix-support/orig-libc
echo "${libc_dev}" > $out/nix-support/orig-libc-dev
''
# fortify-headers is a set of wrapper headers that augment libc
# and use #include_next to pass through to libc's true
# implementations, so must appear before them in search order.
# in theory a correctly placed -idirafter could be used, but in
# practice the compiler may have been built with a --with-headers
# like option that forces the libc headers before all -idirafter,
# hence -isystem here.
+ optionalString includeFortifyHeaders' ''
echo "-isystem ${fortify-headers}/include" >> $out/nix-support/libc-cflags
'')
##
## General libc++ support
##
# We have a libc++ directly, we have one via "smuggled" GCC, or we have one
# bundled with the C compiler because it is GCC
+ optionalString (libcxx != null || (useGccForLibs && gccForLibs.langCC or false) || (isGNU && cc.langCC or false)) ''
touch "$out/nix-support/libcxx-cxxflags"
touch "$out/nix-support/libcxx-ldflags"
''
# Adding -isystem flags should be done only for clang; gcc
# already knows how to find its own libstdc++, and adding
# additional -isystem flags will confuse gfortran (see
# https://github.com/NixOS/nixpkgs/pull/209870#issuecomment-1500550903)
+ optionalString (libcxx == null && isClang && (useGccForLibs && gccForLibs.langCC or false)) ''
for dir in ${gccForLibs}/include/c++/*; do
echo "-isystem $dir" >> $out/nix-support/libcxx-cxxflags
done
for dir in ${gccForLibs}/include/c++/*/${targetPlatform.config}; do
echo "-isystem $dir" >> $out/nix-support/libcxx-cxxflags
done
''
+ optionalString (libcxx.isLLVM or false) ''
echo "-isystem ${getDev libcxx}/include/c++/v1" >> $out/nix-support/libcxx-cxxflags
echo "-stdlib=libc++" >> $out/nix-support/libcxx-ldflags
''
##
## Initial CFLAGS
##
# GCC shows ${cc_solib}/lib in `gcc -print-search-dirs', but not
# ${cc_solib}/lib64 (even though it does actually search there...)..
# This confuses libtool. So add it to the compiler tool search
# path explicitly.
+ optionalString (!nativeTools) ''
if [ -e "${cc_solib}/lib64" -a ! -L "${cc_solib}/lib64" ]; then
ccLDFlags+=" -L${cc_solib}/lib64"
ccCFlags+=" -B${cc_solib}/lib64"
fi
ccLDFlags+=" -L${cc_solib}/lib"
ccCFlags+=" -B${cc_solib}/lib"
'' + optionalString cc.langAda or false ''
touch "$out/nix-support/gnat-cflags"
touch "$out/nix-support/gnat-ldflags"
basePath=$(echo $cc/lib/*/*/*)
ccCFlags+=" -B$basePath -I$basePath/adainclude"
gnatCFlags="-I$basePath/adainclude -I$basePath/adalib"
echo "$gnatCFlags" >> $out/nix-support/gnat-cflags
'' + ''
echo "$ccLDFlags" >> $out/nix-support/cc-ldflags
echo "$ccCFlags" >> $out/nix-support/cc-cflags
'' + optionalString (targetPlatform.isDarwin && (libcxx != null) && (cc.isClang or false)) ''
echo " -L${libcxx_solib}" >> $out/nix-support/cc-ldflags
''
##
## Man page and info support
##
+ optionalString propagateDoc ''
ln -s ${cc.man} $man
ln -s ${cc.info} $info
'' + optionalString (cc.langD or cc.langJava or false) ''
echo "-B${zlib}${zlib.libdir or "/lib/"}" >> $out/nix-support/libc-cflags
''
##
## Hardening support
##
+ ''
export hardening_unsupported_flags="${concatStringsSep " " ccHardeningUnsupportedFlags}"
''
# Machine flags. These are necessary to support
# TODO: We should make a way to support miscellaneous machine
# flags and other gcc flags as well.
# Always add -march based on cpu in triple. Sometimes there is a
# discrepency (x86_64 vs. x86-64), so we provide an "arch" arg in
# that case.
#
# For clang, this is handled in add-clang-cc-cflags-before.sh
# TODO: aarch64-darwin has mcpu incompatible with gcc
+ optionalString ((targetPlatform ? gcc.arch) && !isClang && !(targetPlatform.isDarwin && targetPlatform.isAarch64) &&
isGccArchSupported targetPlatform.gcc.arch) ''
echo "-march=${targetPlatform.gcc.arch}" >> $out/nix-support/cc-cflags-before
''
# -mcpu is not very useful, except on PowerPC where it is used
# instead of march. On all other platforms you should use mtune
# and march instead.
# TODO: aarch64-darwin has mcpu incompatible with gcc
+ optionalString ((targetPlatform ? gcc.cpu) && (isClang || !(targetPlatform.isDarwin && targetPlatform.isAarch64))) ''
echo "-mcpu=${targetPlatform.gcc.cpu}" >> $out/nix-support/cc-cflags-before
''
# -mfloat-abi only matters on arm32 but we set it here
# unconditionally just in case. If the abi specifically sets hard
# vs. soft floats we use it here.
+ optionalString (targetPlatform ? gcc.float-abi) ''
echo "-mfloat-abi=${targetPlatform.gcc.float-abi}" >> $out/nix-support/cc-cflags-before
''
+ optionalString (targetPlatform ? gcc.fpu) ''
echo "-mfpu=${targetPlatform.gcc.fpu}" >> $out/nix-support/cc-cflags-before
''
+ optionalString (targetPlatform ? gcc.mode) ''
echo "-mmode=${targetPlatform.gcc.mode}" >> $out/nix-support/cc-cflags-before
''
+ optionalString (targetPlatform ? gcc.thumb) ''
echo "-m${if targetPlatform.gcc.thumb then "thumb" else "arm"}" >> $out/nix-support/cc-cflags-before
''
+ (let tune = if targetPlatform ? gcc.tune
then findBestTuneApproximation targetPlatform.gcc.tune
else null;
in optionalString (tune != null) ''
echo "-mtune=${tune}" >> $out/nix-support/cc-cflags-before
'')
# TODO: categorize these and figure out a better place for them
+ optionalString targetPlatform.isWindows ''
hardening_unsupported_flags+=" pic"
'' + optionalString targetPlatform.isMinGW ''
hardening_unsupported_flags+=" stackprotector fortify"
'' + optionalString targetPlatform.isAvr ''
hardening_unsupported_flags+=" stackprotector pic"
'' + optionalString (targetPlatform.libc == "newlib" || targetPlatform.libc == "newlib-nano") ''
hardening_unsupported_flags+=" stackprotector fortify pie pic"
'' + optionalString (targetPlatform.libc == "musl" && targetPlatform.isx86_32) ''
hardening_unsupported_flags+=" stackprotector"
'' + optionalString targetPlatform.isNetBSD ''
hardening_unsupported_flags+=" stackprotector fortify"
'' + optionalString cc.langAda or false ''
hardening_unsupported_flags+=" format stackprotector strictoverflow"
'' + optionalString cc.langD or false ''
hardening_unsupported_flags+=" format"
'' + optionalString cc.langFortran or false ''
hardening_unsupported_flags+=" format"
'' + optionalString targetPlatform.isWasm ''
hardening_unsupported_flags+=" stackprotector fortify pie pic"
'' + optionalString targetPlatform.isMicroBlaze ''
hardening_unsupported_flags+=" stackprotector"
''
+ optionalString (libc != null && targetPlatform.isAvr) ''
for isa in avr5 avr3 avr4 avr6 avr25 avr31 avr35 avr51 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7 tiny-stack; do
echo "-B${getLib libc}/avr/lib/$isa" >> $out/nix-support/libc-crt1-cflags
done
''
+ optionalString targetPlatform.isDarwin ''
echo "-arch ${targetPlatform.darwinArch}" >> $out/nix-support/cc-cflags
''
+ optionalString targetPlatform.isAndroid ''
echo "-D__ANDROID_API__=${targetPlatform.sdkVer}" >> $out/nix-support/cc-cflags
''
# There are a few tools (to name one libstdcxx5) which do not work
# well with multi line flags, so make the flags single line again
+ ''
for flags in "$out/nix-support"/*flags*; do
substituteInPlace "$flags" --replace $'\n' ' '
done
substituteAll ${./add-flags.sh} $out/nix-support/add-flags.sh
substituteAll ${./add-hardening.sh} $out/nix-support/add-hardening.sh
substituteAll ${../wrapper-common/utils.bash} $out/nix-support/utils.bash
''
+ optionalString cc.langAda or false ''
substituteAll ${./add-gnat-extra-flags.sh} $out/nix-support/add-gnat-extra-flags.sh
''
##
## General Clang support
## Needs to go after ^ because the for loop eats \n and makes this file an invalid script
##
+ optionalString isClang ''
# Escape twice: once for this script, once for the one it gets substituted into.
export march=${escapeShellArg
(optionalString (targetPlatform ? gcc.arch)
(escapeShellArg "-march=${targetPlatform.gcc.arch}"))}
export defaultTarget=${targetPlatform.config}
substituteAll ${./add-clang-cc-cflags-before.sh} $out/nix-support/add-local-cc-cflags-before.sh
''
##
## Extra custom steps
##
+ extraBuildCommands
+ concatStringsSep "; "
(mapAttrsToList
(name: value: "echo ${toString value} >> $out/nix-support/${name}")
nixSupport);
env = {
inherit isClang;
# for substitution in utils.bash
# TODO(@sternenseemann): invent something cleaner than passing in "" in case of absence
expandResponseParams = "${expand-response-params}/bin/expand-response-params";
# TODO(@sternenseemann): rename env var via stdenv rebuild
shell = getBin runtimeShell + runtimeShell.shellPath or "";
gnugrep_bin = optionalString (!nativeTools) gnugrep;
# stdenv.cc.cc should not be null and we have nothing better for now.
# if the native impure bootstrap is gotten rid of this can become `inherit cc;` again.
cc = optionalString (!nativeTools) cc;
wrapperName = "CC_WRAPPER";
inherit suffixSalt coreutils_bin bintools;
inherit libc_bin libc_dev libc_lib;
inherit darwinPlatformForCC darwinMinVersion darwinMinVersionVariable;
default_hardening_flags_str = builtins.toString defaultHardeningFlags;
};
meta =
let cc_ = optionalAttrs (cc != null) cc; in
(optionalAttrs (cc_ ? meta) (removeAttrs cc.meta ["priority"])) //
{ description = attrByPath ["meta" "description"] "System C compiler" cc_ + " (wrapper script)";
priority = 10;
mainProgram = if name != "" then name else ccName;
};
}

View file

@ -0,0 +1,10 @@
getTargetRole
getTargetRoleWrapper
export FC${role_post}=@named_fc@
# If unset, assume the default hardening flags.
: ${NIX_HARDENING_ENABLE="@default_hardening_flags_str@"}
export NIX_HARDENING_ENABLE
unset -v role_post

View file

@ -0,0 +1,182 @@
#! @shell@
set -eu -o pipefail +o posix
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
path_backup="$PATH"
# That @-vars are substituted separately from bash evaluation makes
# shellcheck think this, and others like it, are useless conditionals.
# shellcheck disable=SC2157
if [[ -n "@coreutils_bin@" && -n "@gnugrep_bin@" ]]; then
PATH="@coreutils_bin@/bin:@gnugrep_bin@/bin"
fi
cInclude=0
source @out@/nix-support/utils.bash
# Flirting with a layer violation here.
if [ -z "${NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
source @bintools@/nix-support/add-flags.sh
fi
# Put this one second so libc ldflags take priority.
if [ -z "${NIX_CC_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
source @out@/nix-support/add-flags.sh
fi
if [ -z "${NIX_GNAT_WRAPPER_EXTRA_FLAGS_SET_@suffixSalt@:-}" ]; then
source @out@/nix-support/add-gnat-extra-flags.sh
fi
# Parse command line options and set several variables.
# For instance, figure out if linker flags should be passed.
# GCC prints annoying warnings when they are not needed.
dontLink=0
nonFlagArgs=0
# shellcheck disable=SC2193
expandResponseParams "$@"
declare -i n=0
nParams=${#params[@]}
while (( "$n" < "$nParams" )); do
p=${params[n]}
p2=${params[n+1]:-} # handle `p` being last one
if [ "$p" = -c ]; then
dontLink=1
elif [ "$p" = -S ]; then
dontLink=1
elif [ "$p" = -E ]; then
dontLink=1
elif [ "$p" = -E ]; then
dontLink=1
elif [ "$p" = -M ]; then
dontLink=1
elif [ "$p" = -MM ]; then
dontLink=1
elif [[ "$p" = -x && "$p2" = *-header ]]; then
dontLink=1
elif [[ "$p" != -?* ]]; then
# A dash alone signifies standard input; it is not a flag
nonFlagArgs=1
fi
n+=1
done
# If we pass a flag like -Wl, then gcc will call the linker unless it
# can figure out that it has to do something else (e.g., because of a
# "-c" flag). So if no non-flag arguments are given, don't pass any
# linker flags. This catches cases like "gcc" (should just print
# "gcc: no input files") and "gcc -v" (should print the version).
if [ "$nonFlagArgs" = 0 ]; then
dontLink=1
fi
# Optionally filter out paths not refering to the store.
if [[ "${NIX_ENFORCE_PURITY:-}" = 1 && -n "$NIX_STORE" ]]; then
rest=()
nParams=${#params[@]}
declare -i n=0
while (( "$n" < "$nParams" )); do
p=${params[n]}
p2=${params[n+1]:-} # handle `p` being last one
if [ "${p:0:3}" = -L/ ] && badPath "${p:2}"; then
skip "${p:2}"
elif [ "$p" = -L ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "${p:0:3}" = -I/ ] && badPath "${p:2}"; then
skip "${p:2}"
elif [ "$p" = -I ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "${p:0:4}" = -aI/ ] && badPath "${p:3}"; then
skip "${p:3}"
elif [ "$p" = -aI ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "${p:0:4}" = -aO/ ] && badPath "${p:3}"; then
skip "${p:3}"
elif [ "$p" = -aO ] && badPath "$p2"; then
n+=1; skip "$p2"
elif [ "$p" = -isystem ] && badPath "$p2"; then
n+=1; skip "$p2"
else
rest+=("$p")
fi
n+=1
done
# Old bash empty array hack
params=(${rest+"${rest[@]}"})
fi
# Clear march/mtune=native -- they bring impurity.
if [ "$NIX_ENFORCE_NO_NATIVE_@suffixSalt@" = 1 ]; then
rest=()
# Old bash empty array hack
for p in ${params+"${params[@]}"}; do
if [[ "$p" = -m*=native ]]; then
skip "$p"
else
rest+=("$p")
fi
done
# Old bash empty array hack
params=(${rest+"${rest[@]}"})
fi
case "$(basename $0)x" in
"gnatbindx")
extraBefore=()
extraAfter=($NIX_GNATFLAGS_COMPILE_@suffixSalt@)
;;
"gnatchopx")
extraBefore=("--GCC=@out@/bin/gcc")
extraAfter=()
;;
"gnatcleanx")
extraBefore=($NIX_GNATFLAGS_COMPILE_@suffixSalt@)
extraAfter=()
;;
"gnatlinkx")
extraBefore=()
extraAfter=("--GCC=@out@/bin/gcc")
;;
"gnatlsx")
extraBefore=()
extraAfter=($NIX_GNATFLAGS_COMPILE_@suffixSalt@)
;;
"gnatmakex")
extraBefore=("--GNATBIND=@out@/bin/gnatbind" "--GNATLINK=@out@/bin/gnatlink")
extraAfter=($NIX_GNATFLAGS_COMPILE_@suffixSalt@ -cargs $NIX_GNATMAKE_CARGS_@suffixSalt@)
;;
esac
# As a very special hack, if the arguments are just `-v', then don't
# add anything. This is to prevent `gcc -v' (which normally prints
# out the version number and returns exit code 0) from printing out
# `No input files specified' and returning exit code 1.
if [ "$*" = -v ]; then
extraAfter=()
extraBefore=()
fi
# Optionally print debug info.
if (( "${NIX_DEBUG:-0}" >= 1 )); then
# Old bash workaround, see ld-wrapper for explanation.
echo "extra flags before to @prog@:" >&2
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
echo "original flags to @prog@:" >&2
printf " %q\n" ${params+"${params[@]}"} >&2
echo "extra flags after to @prog@:" >&2
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
fi
PATH="$path_backup"
# Old bash workaround, see above.
exec @prog@ \
${extraBefore+"${extraBefore[@]}"} \
${params+"${params[@]}"} \
${extraAfter+"${extraAfter[@]}"}

View file

@ -0,0 +1,11 @@
#! @shell@
set -eu -o pipefail +o posix
shopt -s nullglob
if (( "${NIX_DEBUG:-0}" >= 7 )); then
set -x
fi
export GCCGO="@out@/bin/gccgo"
exec @prog@ "$@"

View file

@ -0,0 +1,118 @@
# CC Wrapper hygiene
#
# For at least cross compilation, we need to depend on multiple cc-wrappers at
# once---specifically up to one per sort of dependency. This follows from having
# different tools targeting different platforms, and different flags for those
# tools. For example:
#
# # Flags for compiling (whether or not linking) C code for the...
# NIX_CFLAGS_COMPILE_FOR_BUILD # ...build platform
# NIX_CFLAGS_COMPILE # ...host platform
# NIX_CFLAGS_COMPILE_FOR_TARGET # ...target platform
#
# Notice that these platforms are the 3 *relative* to the package using
# cc-wrapper, not absolute like `x86_64-pc-linux-gnu`.
#
# The simplest solution would be to have separate cc-wrappers per (3 intended
# use-cases * n absolute concrete platforms). For the use-case axis, we would
# @-splice in 'BUILD_' '' 'TARGET_' to use the write environment variables when
# building the cc-wrapper, and likewise prefix the binaries' names so they didn't
# clobber each other on the PATH. But the need for 3x cc-wrappers, along with
# non-standard name prefixes, is annoying and liable to break packages' build
# systems.
#
# Instead, we opt to have just one cc-wrapper per absolute platform. Matching
# convention, the binaries' names can just be prefixed with their target
# platform. On the other hand, that means packages will depend on not just
# multiple cc-wrappers, but the exact same cc-wrapper derivation multiple ways.
# That means the exact same cc-wrapper derivation must be able to avoid
# conflicting with itself, despite the fact that `setup-hook.sh`, the `addCvars`
# function, and `add-flags.sh` are all communicating with each other with
# environment variables. Yuck.
#
# The basic strategy is:
#
# - Everyone exclusively *adds information* to relative-platform-specific
# environment variables, like `NIX_CFLAGS_COMPILE_FOR_TARGET`, to communicate
# with the wrapped binaries.
#
# - The wrapped binaries will exclusively *read* cc-wrapper-derivation-specific
# environment variables distinguished with with `suffixSalt`, like
# `NIX_CFLAGS_COMPILE_@suffixSalt@`.
#
# - `add-flags`, beyond its old task of reading extra flags stuck inside the
# cc-wrapper derivation, will convert the relative-platform-specific
# variables to cc-wrapper-derivation-specific variables. This conversion is
# the only time all but one of the cc-wrapper-derivation-specific variables
# are set.
#
# This ensures the flow of information is exclusive from
# relative-platform-specific variables to cc-wrapper-derivation-specific
# variables. This allows us to support the general case of a many--many relation
# between relative platforms and cc-wrapper derivations.
#
# For more details, read the individual files where the mechanisms used to
# accomplish this will be individually documented.
# Skip setup hook if we're neither a build-time dep, nor, temporarily, doing a
# native compile.
#
# TODO(@Ericson2314): No native exception
[[ -z ${strictDeps-} ]] || (( "$hostOffset" < 0 )) || return 0
# It's fine that any other cc-wrapper will redefine this. Bash functions close
# over no state, and there's no @-substitutions within, so any redefined
# function is guaranteed to be exactly the same.
ccWrapper_addCVars () {
# See ../setup-hooks/role.bash
local role_post
getHostRoleEnvHook
if [ -d "$1/include" ]; then
export NIX_CFLAGS_COMPILE${role_post}+=" -isystem $1/include"
fi
if [ -d "$1/Library/Frameworks" ]; then
export NIX_CFLAGS_COMPILE${role_post}+=" -iframework $1/Library/Frameworks"
fi
}
# See ../setup-hooks/role.bash
getTargetRole
getTargetRoleWrapper
# We use the `targetOffset` to choose the right env hook to accumulate the right
# sort of deps (those with that offset).
addEnvHooks "$targetOffset" ccWrapper_addCVars
# Note 1: these come *after* $out in the PATH (see setup.sh).
# Note 2: phase separation makes this look useless to shellcheck.
# shellcheck disable=SC2157
if [ -n "@cc@" ]; then
addToSearchPath _PATH @cc@/bin
fi
# shellcheck disable=SC2157
if [ -n "@libc_bin@" ]; then
addToSearchPath _PATH @libc_bin@/bin
fi
# shellcheck disable=SC2157
if [ -n "@coreutils_bin@" ]; then
addToSearchPath _PATH @coreutils_bin@/bin
fi
# Export tool environment variables so various build systems use the right ones.
export NIX_CC${role_post}=@out@
export CC${role_post}=@named_cc@
export CXX${role_post}=@named_cxx@
# If unset, assume the default hardening flags.
: ${NIX_HARDENING_ENABLE="@default_hardening_flags_str@"}
export NIX_HARDENING_ENABLE
# No local scope in sourced file
unset -v role_post

View file

@ -0,0 +1,95 @@
{ lib
, buildPackages
}:
let
# rudimentary support for cross-compiling
# see: https://github.com/NixOS/nixpkgs/pull/279487#discussion_r1444449726
inherit (buildPackages)
mktemp
rsync
;
in
rec {
/* Prepare a derivation for local builds.
*
* This function prepares checkpoint builds by storing
* the build output and the sources for cross checking.
* The build output can be used later to allow checkpoint builds
* by passing the derivation output to the `mkCheckpointBuild` function.
*
* To build a project with checkpoints, follow these steps:
* - run `prepareCheckpointBuild` on the desired derivation, e.g.
* checkpointArtifacts = prepareCheckpointBuild virtualbox;
* - change something you want in the sources of the package,
* e.g. using source override:
* changedVBox = pkgs.virtuabox.overrideAttrs (old: {
* src = path/to/vbox/sources;
* };
* - use `mkCheckpointBuild changedVBox checkpointArtifacts`
* - enjoy shorter build times
*/
prepareCheckpointBuild = drv: drv.overrideAttrs (old: {
outputs = [ "out" ];
name = drv.name + "-checkpointArtifacts";
# To determine differences between the state of the build directory
# from an earlier build and a later one we store the state of the build
# directory before build, but after patch phases.
# This way, the same derivation can be used multiple times and only changes are detected.
# Additionally, removed files are handled correctly in later builds.
preBuild = (old.preBuild or "") + ''
mkdir -p $out/sources
cp -r ./* $out/sources/
'';
# After the build, the build directory is copied again
# to get the output files.
# We copy the complete build folder, to take care of
# build tools that build in the source directory, instead of
# having a separate build directory such as the Linux kernel.
installPhase = ''
runHook preCheckpointInstall
mkdir -p $out/outputs
cp -r ./* $out/outputs/
runHook postCheckpointInstall
unset postPhases
'';
dontFixup = true;
doInstallCheck = false;
doDist = false;
});
/* Build a derivation based on the checkpoint output generated by
* the `prepareCheckpointBuild` function.
*
* Usage:
* let
* checkpointArtifacts = prepareCheckpointBuild drv;
* in mkCheckpointBuild drv checkpointArtifacts
*/
mkCheckpointBuild = drv: checkpointArtifacts: drv.overrideAttrs (old: {
# The actual checkpoint build phase.
# We compare the changed sources from a previous build with the current and create a patch.
# Afterwards we clean the build directory and copy the previous output files (including the sources).
# The source difference patch is then applied to get the latest changes again to allow short build times.
preBuild = (old.preBuild or "") + ''
set +e
sourceDifferencePatchFile=$(${mktemp}/bin/mktemp)
diff -ur ${checkpointArtifacts}/sources ./ > "$sourceDifferencePatchFile"
set -e
shopt -s dotglob
rm -r *
${rsync}/bin/rsync \
--checksum --times --atimes --chown=$USER:$USER --chmod=+w \
-r ${checkpointArtifacts}/outputs/ .
patch -p 1 -i "$sourceDifferencePatchFile"
rm "$sourceDifferencePatchFile"
'';
});
mkCheckpointedBuild = lib.warn
"`mkCheckpointedBuild` is deprecated, use `mkCheckpointBuild` instead!"
mkCheckpointBuild;
}

View file

@ -0,0 +1,42 @@
# This derivation builds two files containing information about the
# closure of 'rootPaths': $out/store-paths contains the paths in the
# closure, and $out/registration contains a file suitable for use with
# "nix-store --load-db" and "nix-store --register-validity
# --hash-given".
{ stdenv, coreutils, jq }:
{ rootPaths }:
assert builtins.langVersion >= 5;
stdenv.mkDerivation {
name = "closure-info";
__structuredAttrs = true;
exportReferencesGraph.closure = rootPaths;
preferLocalBuild = true;
nativeBuildInputs = [ coreutils jq ];
empty = rootPaths == [];
buildCommand =
''
out=''${outputs[out]}
mkdir $out
if [[ -n "$empty" ]]; then
echo 0 > $out/total-nar-size
touch $out/registration $out/store-paths
else
jq -r ".closure | map(.narSize) | add" < "$NIX_ATTRS_JSON_FILE" > $out/total-nar-size
jq -r '.closure | map([.path, .narHash, .narSize, "", (.references | length)] + .references) | add | map("\(.)\n") | add' < "$NIX_ATTRS_JSON_FILE" | head -n -1 > $out/registration
jq -r '.closure[].path' < "$NIX_ATTRS_JSON_FILE" > $out/store-paths
fi
'';
}

View file

@ -0,0 +1,155 @@
{ lib, stdenv, coqPackages, coq, which, fetchzip }@args:
let
lib = import ./extra-lib.nix {
inherit (args) lib;
};
inherit (lib)
concatStringsSep
flip
foldl
isFunction
isString
optional
optionalAttrs
optionals
optionalString
pred
remove
switch
versions
;
inherit (lib.attrsets) removeAttrs;
inherit (lib.strings) match;
isGitHubDomain = d: match "^github.*" d != null;
isGitLabDomain = d: match "^gitlab.*" d != null;
in
{ pname,
version ? null,
fetcher ? null,
owner ? "coq-community",
domain ? "github.com",
repo ? pname,
defaultVersion ? null,
releaseRev ? (v: v),
displayVersion ? {},
release ? {},
buildInputs ? [],
nativeBuildInputs ? [],
extraBuildInputs ? [],
extraNativeBuildInputs ? [],
overrideBuildInputs ? [],
overrideNativeBuildInputs ? [],
namePrefix ? [ "coq" ],
enableParallelBuilding ? true,
extraInstallFlags ? [],
setCOQBIN ? true,
mlPlugin ? false,
useMelquiondRemake ? null,
dropAttrs ? [],
keepAttrs ? [],
dropDerivationAttrs ? [],
useDuneifVersion ? (x: false),
useDune ? false,
opam-name ? (concatStringsSep "-" (namePrefix ++ [ pname ])),
...
}@args:
let
args-to-remove = foldl (flip remove) ([
"version" "fetcher" "repo" "owner" "domain" "releaseRev"
"displayVersion" "defaultVersion" "useMelquiondRemake"
"release"
"buildInputs" "nativeBuildInputs"
"extraBuildInputs" "extraNativeBuildInputs"
"overrideBuildInputs" "overrideNativeBuildInputs"
"namePrefix"
"meta" "useDuneifVersion" "useDune" "opam-name"
"extraInstallFlags" "setCOQBIN" "mlPlugin"
"dropAttrs" "dropDerivationAttrs" "keepAttrs" ] ++ dropAttrs) keepAttrs;
fetch = import ../coq/meta-fetch/default.nix
{ inherit lib stdenv fetchzip; } ({
inherit release releaseRev;
location = { inherit domain owner repo; };
} // optionalAttrs (args?fetcher) {inherit fetcher;});
fetched = fetch (if version != null then version else defaultVersion);
display-pkg = n: sep: v:
let d = displayVersion.${n} or (if sep == "" then ".." else true); in
n + optionalString (v != "" && v != null) (switch d [
{ case = true; out = sep + v; }
{ case = "."; out = sep + versions.major v; }
{ case = ".."; out = sep + versions.majorMinor v; }
{ case = "..."; out = sep + versions.majorMinorPatch v; }
{ case = isFunction; out = optionalString (d v != "") (sep + d v); }
{ case = isString; out = optionalString (d != "") (sep + d); }
] "") + optionalString (v == null) "-broken";
append-version = p: n: p + display-pkg n "" coqPackages.${n}.version + "-";
prefix-name = foldl append-version "" namePrefix;
useDune = args.useDune or (useDuneifVersion fetched.version);
coqlib-flags = switch coq.coq-version [
{ case = v: versions.isLe "8.6" v && v != "dev" ;
out = [ "COQLIB=$(out)/lib/coq/${coq.coq-version}/" ]; }
] [ "COQLIBINSTALL=$(out)/lib/coq/${coq.coq-version}/user-contrib"
"COQPLUGININSTALL=$(OCAMLFIND_DESTDIR)" ];
docdir-flags = switch coq.coq-version [
{ case = v: versions.isLe "8.6" v && v != "dev";
out = [ "DOCDIR=$(out)/share/coq/${coq.coq-version}/" ]; }
] [ "COQDOCINSTALL=$(out)/share/coq/${coq.coq-version}/user-contrib" ];
in
stdenv.mkDerivation (removeAttrs ({
name = prefix-name + (display-pkg pname "-" fetched.version);
inherit (fetched) version src;
nativeBuildInputs = args.overrideNativeBuildInputs
or ([ which ]
++ optional useDune coq.ocamlPackages.dune_3
++ optionals (useDune || mlPlugin) [ coq.ocamlPackages.ocaml coq.ocamlPackages.findlib ]
++ (args.nativeBuildInputs or []) ++ extraNativeBuildInputs);
buildInputs = args.overrideBuildInputs
or ([ coq ] ++ (args.buildInputs or []) ++ extraBuildInputs);
inherit enableParallelBuilding;
meta = ({ platforms = coq.meta.platforms; } //
(switch domain [{
case = pred.union isGitHubDomain isGitLabDomain;
out = { homepage = "https://${domain}/${owner}/${repo}"; };
}] {}) //
optionalAttrs (fetched.broken or false) { coqFilter = true; broken = true; }) //
(args.meta or {}) ;
}
// (optionalAttrs setCOQBIN { COQBIN = "${coq}/bin/"; })
// (optionalAttrs (!args?installPhase && !args?useMelquiondRemake) {
installFlags =
coqlib-flags ++ docdir-flags ++
extraInstallFlags;
})
// (optionalAttrs useDune {
buildPhase = ''
runHook preBuild
dune build -p ${opam-name} ''${enableParallelBuilding:+-j $NIX_BUILD_CORES}
runHook postBuild
'';
installPhase = ''
runHook preInstall
dune install ${opam-name} --prefix=$out
mv $out/lib/coq $out/lib/TEMPORARY
mkdir $out/lib/coq/
mv $out/lib/TEMPORARY $out/lib/coq/${coq.coq-version}
runHook postInstall
'';
})
// (optionalAttrs (args?useMelquiondRemake) rec {
COQUSERCONTRIB = "$out/lib/coq/${coq.coq-version}/user-contrib";
preConfigurePhases = "autoconf";
configureFlags = [ "--libdir=${COQUSERCONTRIB}/${useMelquiondRemake.logpath or ""}" ];
buildPhase = "./remake -j$NIX_BUILD_CORES";
installPhase = "./remake install";
})
// (removeAttrs args args-to-remove)) dropDerivationAttrs)

View file

@ -0,0 +1,213 @@
{ lib }:
let
inherit (lib)
all
concatStringsSep
findFirst
flip
getAttr
head
isFunction
length
recursiveUpdate
splitVersion
tail
take
versionAtLeast
versionOlder
zipListsWith
;
in
recursiveUpdate lib (rec {
versions =
let
truncate = n: v: concatStringsSep "." (take n (splitVersion v));
opTruncate = op: v0: v: let n = length (splitVersion v0); in
op (truncate n v) (truncate n v0);
in rec {
/* Get string of the first n parts of a version string.
Example:
- truncate 2 "1.2.3-stuff"
=> "1.2"
- truncate 4 "1.2.3-stuff"
=> "1.2.3.stuff"
*/
inherit truncate;
/* Get string of the first three parts (major, minor and patch)
of a version string.
Example:
majorMinorPatch "1.2.3-stuff"
=> "1.2.3"
*/
majorMinorPatch = truncate 3;
/* Version comparison predicates,
- isGe v0 v <-> v is greater or equal than v0 [*]
- isLe v0 v <-> v is lesser or equal than v0 [*]
- isGt v0 v <-> v is strictly greater than v0 [*]
- isLt v0 v <-> v is strictly lesser than v0 [*]
- isEq v0 v <-> v is equal to v0 [*]
- range low high v <-> v is between low and high [**]
[*] truncating v to the same number of digits as v0
[**] truncating v to low for the lower bound and high for the upper bound
Examples:
- isGe "8.10" "8.10.1"
=> true
- isLe "8.10" "8.10.1"
=> true
- isGt "8.10" "8.10.1"
=> false
- isGt "8.10.0" "8.10.1"
=> true
- isEq "8.10" "8.10.1"
=> true
- range "8.10" "8.11" "8.11.1"
=> true
- range "8.10" "8.11+" "8.11.0"
=> false
- range "8.10" "8.11+" "8.11+beta1"
=> false
*/
isGe = opTruncate versionAtLeast;
isGt = opTruncate (flip versionOlder);
isLe = opTruncate (flip versionAtLeast);
isLt = opTruncate versionOlder;
isEq = opTruncate pred.equal;
range = low: high: pred.inter (versions.isGe low) (versions.isLe high);
};
/* Returns a list of list, splitting it using a predicate.
This is analoguous to builtins.split sep list,
with a predicate as a separator and a list instead of a string.
Type: splitList :: (a -> bool) -> [a] -> [[a]]
Example:
splitList (x: x == "x") [ "y" "x" "z" "t" ]
=> [ [ "y" ] "x" [ "z" "t" ] ]
*/
splitList = pred: l: # put in file lists
let loop = (vv: v: l: if l == [] then vv ++ [v]
else let hd = head l; tl = tail l; in
if pred hd then loop (vv ++ [ v hd ]) [] tl else loop vv (v ++ [hd]) tl);
in loop [] [] l;
pred = {
/* Predicate intersection, union, and complement */
inter = p: q: x: p x && q x;
union = p: q: x: p x || q x;
compl = p: x: ! p x;
true = p: true;
false = p: false;
/* predicate "being equal to y" */
equal = y: x: x == y;
};
/* Emulate a "switch - case" construct,
instead of relying on `if then else if ...` */
/* Usage:
```nix
switch-if [
if-clause-1
..
if-clause-k
] default-out
```
where a if-clause has the form `{ cond = b; out = r; }`
the first branch such as `b` is true */
switch-if = c: d: (findFirst (getAttr "cond") {} c).out or d;
/* Usage:
```nix
switch x [
simple-clause-1
..
simple-clause-k
] default-out
```
where a simple-clause has the form `{ case = p; out = r; }`
the first branch such as `p x` is true
or
```nix
switch [ x1 .. xn ] [
complex-clause-1
..
complex-clause-k
] default-out
```
where a complex-clause is either a simple-clause
or has the form { cases = [ p1 .. pn ]; out = r; }
in which case the first branch such as all `pi x` are true
if the variables p are not functions,
they are converted to a equal p
if out is missing the default-out is taken */
switch = var: clauses: default: with pred; let
compare = f: if isFunction f then f else equal f;
combine = cl: var:
if cl?case then compare cl.case var
else all (equal true) (zipListsWith compare cl.cases var); in
switch-if (map (cl: { cond = combine cl var; inherit (cl) out; }) clauses) default;
/* Override arguments to mkCoqDerivation for a Coq library.
This function allows you to easily override arguments to mkCoqDerivation,
even when they are not exposed by the Coq library directly.
Type: overrideCoqDerivation :: AttrSet -> CoqLibraryDerivation -> CoqLibraryDerivation
Example:
```nix
coqPackages.lib.overrideCoqDerivation
{
defaultVersion = "9999";
release."9999".sha256 = "1lq8x86vd3vqqh2yq6hvyagpnhfq5wmk5pg2z0xq7b7dbbbhyfkw";
}
coqPackages.QuickChick;
```
This example overrides the `defaultVersion` and `release` arguments that
are passed to `mkCoqDerivation` in the QuickChick derivation.
Note that there is a difference between using `.override` on a Coq
library vs this `overrideCoqDerivation` function. `.override` allows you
to modify arguments to the derivation itself, for instance by passing
different versions of dependencies:
```nix
coqPackages.QuickChick.override { ssreflect = my-cool-ssreflect; }
```
whereas `overrideCoqDerivation` allows you to override arguments to the
call to `mkCoqDerivation` in the Coq library.
Note that all Coq libraries in Nixpkgs have a `version` argument for
easily using a different version. So if all you want to do is use a
different version, and the derivation for the Coq library already has
support for the version you want, you likely only need to update the
`version` argument on the library derivation. This is done with
`.override`:
```nix
coqPackages.QuickChick.override { version = "1.4.0"; }
```
*/
overrideCoqDerivation = f: drv: (drv.override (args: {
mkCoqDerivation = drv_: (args.mkCoqDerivation drv_).override f;
}));
})

View file

@ -0,0 +1,95 @@
{ lib, stdenv, fetchzip }@args:
let
lib = import ../extra-lib.nix {
inherit (args) lib;
};
inherit (lib)
attrNames
fakeSha256
filter
findFirst
head
isAttrs
isPath
isString
last
length
optionalAttrs
pathExists
pred
sort
switch
switch-if
versionAtLeast
versions
;
inherit (lib.strings) match split;
default-fetcher = {domain ? "github.com", owner ? "", repo, rev, name ? "source", sha256 ? null, ...}@args:
let ext = if args?sha256 then "zip" else "tar.gz";
fmt = if args?sha256 then "zip" else "tarball";
pr = match "^#(.*)$" rev;
url = switch-if [
{ cond = pr == null && (match "^github.*" domain) != null;
out = "https://${domain}/${owner}/${repo}/archive/${rev}.${ext}"; }
{ cond = pr != null && (match "^github.*" domain) != null;
out = "https://api.${domain}/repos/${owner}/${repo}/${fmt}/pull/${head pr}/head"; }
{ cond = pr == null && (match "^gitlab.*" domain) != null;
out = "https://${domain}/${owner}/${repo}/-/archive/${rev}/${repo}-${rev}.${ext}"; }
{ cond = (match "(www.)?mpi-sws.org" domain) != null;
out = "https://www.mpi-sws.org/~${owner}/${repo}/download/${repo}-${rev}.${ext}";}
] (throw "meta-fetch: no fetcher found for domain ${domain} on ${rev}");
fetch = x: if args?sha256 then fetchzip (x // { inherit sha256; }) else builtins.fetchTarball x;
in fetch { inherit url ; };
in
{
fetcher ? default-fetcher,
location,
release ? {},
releaseRev ? (v: v),
}:
let isVersion = x: isString x && match "^/.*" x == null && release?${x};
shortVersion = x: if (isString x && match "^/.*" x == null)
then findFirst (v: versions.majorMinor v == x) null
(sort versionAtLeast (attrNames release))
else null;
isShortVersion = x: shortVersion x != null;
isPathString = x: isString x && match "^/.*" x != null && pathExists x; in
arg:
switch arg [
{ case = isNull; out = { version = "broken"; src = ""; broken = true; }; }
{ case = isPathString; out = { version = "dev"; src = arg; }; }
{ case = pred.union isVersion isShortVersion;
out = let
v = if isVersion arg then arg else shortVersion arg;
given-sha256 = release.${v}.sha256 or "";
sha256 = if given-sha256 == "" then fakeSha256 else given-sha256;
rv = release.${v} // { inherit sha256; };
in
{
version = rv.version or v;
src = rv.src or fetcher (location // { rev = releaseRev v; } // rv);
};
}
{ case = isString;
out = let
splitted = filter isString (split ":" arg);
rev = last splitted;
has-owner = length splitted > 1;
version = "dev"; in {
inherit version;
src = fetcher (location // { inherit rev; } //
(optionalAttrs has-owner { owner = head splitted; }));
}; }
{ case = isAttrs;
out = {
version = arg.version or "dev";
src = (arg.fetcher or fetcher) (location // (arg.location or {})); }; }
{ case = isPath;
out = {
version = "dev" ;
src = builtins.path {path = arg; name = location.name or "source";}; }; }
] (throw "not a valid source description")

View file

@ -0,0 +1,132 @@
{ lib
, stdenv
, callPackage
, runCommand
, writeText
, pub2nix
, dartHooks
, makeWrapper
, dart
, nodejs
, darwin
, jq
, yq
}:
{ src
, sourceRoot ? "source"
, packageRoot ? (lib.removePrefix "/" (lib.removePrefix "source" sourceRoot))
, gitHashes ? { }
, sdkSourceBuilders ? { }
, customSourceBuilders ? { }
, sdkSetupScript ? ""
, extraPackageConfigSetup ? ""
# Output type to produce. Can be any kind supported by dart
# https://dart.dev/tools/dart-compile#types-of-output
# If using jit, you might want to pass some arguments to `dartJitFlags`
, dartOutputType ? "exe"
, dartCompileCommand ? "dart compile"
, dartCompileFlags ? [ ]
# These come at the end of the command, useful to pass flags to the jit run
, dartJitFlags ? [ ]
# Attrset of entry point files to build and install.
# Where key is the final binary path and value is the source file path
# e.g. { "bin/foo" = "bin/main.dart"; }
# Set to null to read executables from pubspec.yaml
, dartEntryPoints ? null
# Used when wrapping aot, jit, kernel, and js builds.
# Set to null to disable wrapping.
, dartRuntimeCommand ? if dartOutputType == "aot-snapshot" then "${dart}/bin/dartaotruntime"
else if (dartOutputType == "jit-snapshot" || dartOutputType == "kernel") then "${dart}/bin/dart"
else if dartOutputType == "js" then "${nodejs}/bin/node"
else null
, runtimeDependencies ? [ ]
, extraWrapProgramArgs ? ""
, autoPubspecLock ? null
, pubspecLock ? if autoPubspecLock == null then
throw "The pubspecLock argument is required. If import-from-derivation is allowed (it isn't in Nixpkgs), you can set autoPubspecLock to the path to a pubspec.lock instead."
else
assert lib.assertMsg (builtins.pathExists autoPubspecLock) "The pubspec.lock file could not be found!";
lib.importJSON (runCommand "${lib.getName args}-pubspec-lock-json" { nativeBuildInputs = [ yq ]; } ''yq . '${autoPubspecLock}' > "$out"'')
, ...
}@args:
let
generators = callPackage ./generators.nix { inherit dart; } { buildDrvArgs = args; };
pubspecLockFile = builtins.toJSON pubspecLock;
pubspecLockData = pub2nix.readPubspecLock { inherit src packageRoot pubspecLock gitHashes sdkSourceBuilders customSourceBuilders; };
packageConfig = generators.linkPackageConfig {
packageConfig = pub2nix.generatePackageConfig {
pname = if args.pname != null then "${args.pname}-${args.version}" else null;
dependencies =
# Ideally, we'd only include the main dependencies and their transitive
# dependencies.
#
# The pubspec.lock file does not contain information about where
# transitive dependencies come from, though, and it would be weird to
# include the transitive dependencies of dev and override dependencies
# without including the dev and override dependencies themselves.
builtins.concatLists (builtins.attrValues pubspecLockData.dependencies);
inherit (pubspecLockData) dependencySources;
};
extraSetupCommands = extraPackageConfigSetup;
};
inherit (dartHooks.override { inherit dart; }) dartConfigHook dartBuildHook dartInstallHook dartFixupHook;
baseDerivation = stdenv.mkDerivation (finalAttrs: (builtins.removeAttrs args [ "gitHashes" "sdkSourceBuilders" "pubspecLock" "customSourceBuilders" ]) // {
inherit pubspecLockFile packageConfig sdkSetupScript
dartCompileCommand dartOutputType dartRuntimeCommand dartCompileFlags
dartJitFlags;
outputs = [ "out" "pubcache" ] ++ args.outputs or [ ];
dartEntryPoints =
if (dartEntryPoints != null)
then writeText "entrypoints.json" (builtins.toJSON dartEntryPoints)
else null;
runtimeDependencies = map lib.getLib runtimeDependencies;
nativeBuildInputs = (args.nativeBuildInputs or [ ]) ++ [
dart
dartConfigHook
dartBuildHook
dartInstallHook
dartFixupHook
makeWrapper
jq
] ++ lib.optionals stdenv.isDarwin [
darwin.sigtool
] ++
# Ensure that we inherit the propagated build inputs from the dependencies.
builtins.attrValues pubspecLockData.dependencySources;
preConfigure = args.preConfigure or "" + ''
ln -sf "$pubspecLockFilePath" pubspec.lock
'';
# When stripping, it seems some ELF information is lost and the dart VM cli
# runs instead of the expected program. Don't strip if it's an exe output.
dontStrip = args.dontStrip or (dartOutputType == "exe");
passAsFile = [ "pubspecLockFile" ];
passthru = {
pubspecLock = pubspecLockData;
} // (args.passthru or { });
meta = (args.meta or { }) // { platforms = args.meta.platforms or dart.meta.platforms; };
});
in
assert !(builtins.isString dartOutputType && dartOutputType != "") ->
throw "dartOutputType must be a non-empty string";
baseDerivation

View file

@ -0,0 +1,74 @@
{ lib
, stdenvNoCC
, dart
, dartHooks
, jq
, yq
, cacert
}:
{
# Arguments used in the derivation that builds the Dart package.
# Passing these is recommended to ensure that the same steps are made to
# prepare the sources in both this derivation and the one that builds the Dart
# package.
buildDrvArgs ? { }
, ...
}@args:
# This is a derivation and setup hook that can be used to fetch dependencies for Dart projects.
# It is designed to be placed in the nativeBuildInputs of a derivation that builds a Dart package.
# Providing the buildDrvArgs argument is highly recommended.
let
buildDrvInheritArgNames = [
"name"
"pname"
"version"
"src"
"sourceRoot"
"setSourceRoot"
"preUnpack"
"unpackPhase"
"unpackCmd"
"postUnpack"
"prePatch"
"patchPhase"
"patches"
"patchFlags"
"postPatch"
];
buildDrvInheritArgs = builtins.foldl'
(attrs: arg:
if buildDrvArgs ? ${arg}
then attrs // { ${arg} = buildDrvArgs.${arg}; }
else attrs)
{ }
buildDrvInheritArgNames;
drvArgs = buildDrvInheritArgs // (removeAttrs args [ "buildDrvArgs" ]);
name = (if drvArgs ? name then drvArgs.name else "${drvArgs.pname}-${drvArgs.version}");
# Adds the root package to a dependency package_config.json file from pub2nix.
linkPackageConfig = { packageConfig, extraSetupCommands ? "" }: stdenvNoCC.mkDerivation (drvArgs // {
name = "${name}-package-config-with-root.json";
nativeBuildInputs = drvArgs.nativeBuildInputs or [ ] ++ args.nativeBuildInputs or [ ] ++ [ jq yq ];
dontBuild = true;
installPhase = ''
runHook preInstall
packageName="$(yq --raw-output .name pubspec.yaml)"
jq --arg name "$packageName" '.packages |= . + [{ name: $name, rootUri: "../", packageUri: "lib/" }]' '${packageConfig}' > "$out"
${extraSetupCommands}
runHook postInstall
'';
});
in
{
inherit
linkPackageConfig;
}

View file

@ -0,0 +1,34 @@
# shellcheck shell=bash
# Outputs line-separated "${dest}\t${source}"
_getDartEntryPoints() {
if [ -n "$dartEntryPoints" ]; then
@jq@ -r '(to_entries | map(.key + "\t" + .value) | join("\n"))' "$dartEntryPoints"
else
# The pubspec executables section follows the pattern:
# <output-bin-name>: [source-file-name]
# Where source-file-name defaults to output-bin-name if omited
@yq@ -r '(.executables | to_entries | map("bin/" + .key + "\t" + "bin/" + (.value // .key) + ".dart") | join("\n"))' pubspec.yaml
fi
}
dartBuildHook() {
echo "Executing dartBuildHook"
runHook preBuild
while IFS=$'\t' read -ra target; do
dest="${target[0]}"
src="${target[1]}"
eval "$dartCompileCommand" "$dartOutputType" \
-o "$dest" "${dartCompileFlags[@]}" "$src" "${dartJitFlags[@]}"
done < <(_getDartEntryPoints)
runHook postBuild
echo "Finished dartBuildHook"
}
if [ -z "${dontDartBuild-}" ] && [ -z "${buildPhase-}" ]; then
buildPhase=dartBuildHook
fi

View file

@ -0,0 +1,70 @@
# shellcheck shell=bash
dartConfigHook() {
echo "Executing dartConfigHook"
echo "Setting up SDK"
eval "$sdkSetupScript"
echo "Installing dependencies"
mkdir -p .dart_tool
cp "$packageConfig" .dart_tool/package_config.json
packagePath() {
jq --raw-output --arg name "$1" '.packages.[] | select(.name == $name) .rootUri | sub("file://"; "")' .dart_tool/package_config.json
}
# Runs a Dart executable from a package with a custom path.
#
# Usage:
# packageRunCustom <package> [executable] [bin_dir]
#
# By default, [bin_dir] is "bin", and [executable] is <package>.
# i.e. `packageRunCustom build_runner` is equivalent to `packageRunCustom build_runner build_runner bin`, which runs `bin/build_runner.dart` from the build_runner package.
packageRunCustom() {
local args=()
local passthrough=()
while [ $# -gt 0 ]; do
if [ "$1" != "--" ]; then
args+=("$1")
shift
else
shift
passthrough=("$@")
break
fi
done
local name="${args[0]}"
local path="${args[1]:-$name}"
local prefix="${args[2]:-bin}"
dart --packages=.dart_tool/package_config.json "$(packagePath "$name")/$prefix/$path.dart" "${passthrough[@]}"
}
# Runs a Dart executable from a package.
#
# Usage:
# packageRun <package> [-e executable] [...]
#
# To run an executable from an unconventional location, use packageRunCustom.
packageRun() {
local name="$1"
shift
local executableName="$name"
if [ "$1" = "-e" ]; then
shift
executableName="$1"
shift
fi
fileName="$(@yq@ --raw-output --arg name "$executableName" '.executables.[$name] // $name' "$(packagePath "$name")/pubspec.yaml")"
packageRunCustom "$name" "$fileName" -- "$@"
}
echo "Finished dartConfigHook"
}
postConfigureHooks+=(dartConfigHook)

View file

@ -0,0 +1,35 @@
# shellcheck shell=bash
dartFixupHook() {
echo "Executing dartFixupHook"
declare -a wrapProgramArgs
# Add runtime library dependencies to the LD_LIBRARY_PATH.
# For some reason, the RUNPATH of the executable is not used to load dynamic libraries in dart:ffi with DynamicLibrary.open().
#
# This could alternatively be fixed with patchelf --add-needed, but this would cause all the libraries to be opened immediately,
# which is not what application authors expect.
APPLICATION_LD_LIBRARY_PATH=""
for runtimeDependency in "${runtimeDependencies[@]}"; do
addToSearchPath APPLICATION_LD_LIBRARY_PATH "${runtimeDependency}/lib"
done
if [[ ! -z "$APPLICATION_LD_LIBRARY_PATH" ]]; then
wrapProgramArgs+=(--suffix LD_LIBRARY_PATH : \"$APPLICATION_LD_LIBRARY_PATH\")
fi
if [[ ! -z "$extraWrapProgramArgs" ]]; then
wrapProgramArgs+=("$extraWrapProgramArgs")
fi
if [ ${#wrapProgramArgs[@]} -ne 0 ]; then
for f in "$out"/bin/*; do
echo "Wrapping $f..."
eval "wrapProgram \"$f\" ${wrapProgramArgs[@]}"
done
fi
echo "Finished dartFixupHook"
}
postFixupHooks+=(dartFixupHook)

View file

@ -0,0 +1,43 @@
# shellcheck shell=bash
dartInstallHook() {
echo "Executing dartInstallHook"
runHook preInstall
# Install snapshots and executables.
mkdir -p "$out"
while IFS=$'\t' read -ra target; do
dest="${target[0]}"
# Wrap with runtime command, if it's defined
if [ -n "$dartRuntimeCommand" ]; then
install -D "$dest" "$out/share/$dest"
makeWrapper "$dartRuntimeCommand" "$out/$dest" \
--add-flags "$out/share/$dest"
else
install -Dm755 "$dest" "$out/$dest"
fi
done < <(_getDartEntryPoints)
runHook postInstall
echo "Finished dartInstallHook"
}
dartInstallCacheHook() {
echo "Executing dartInstallCacheHook"
# Install the package_config.json file.
mkdir -p "$pubcache"
cp .dart_tool/package_config.json "$pubcache/package_config.json"
echo "Finished dartInstallCacheHook"
}
if [ -z "${dontDartInstall-}" ] && [ -z "${installPhase-}" ]; then
installPhase=dartInstallHook
fi
if [ -z "${dontDartInstallCache-}" ]; then
postInstallHooks+=(dartInstallCacheHook)
fi

View file

@ -0,0 +1,20 @@
{ lib, makeSetupHook, dart, yq, jq }:
{
dartConfigHook = makeSetupHook {
name = "dart-config-hook";
substitutions.yq = "${yq}/bin/yq";
substitutions.jq = "${jq}/bin/jq";
} ./dart-config-hook.sh;
dartBuildHook = makeSetupHook {
name = "dart-build-hook";
substitutions.yq = "${yq}/bin/yq";
substitutions.jq = "${jq}/bin/jq";
} ./dart-build-hook.sh;
dartInstallHook = makeSetupHook {
name = "dart-install-hook";
} ./dart-install-hook.sh;
dartFixupHook = makeSetupHook {
name = "dart-fixup-hook";
} ./dart-fixup-hook.sh;
}

View file

@ -0,0 +1,6 @@
{ callPackage }:
{
readPubspecLock = callPackage ./pubspec-lock.nix { };
generatePackageConfig = callPackage ./package-config.nix { };
}

View file

@ -0,0 +1,68 @@
{ lib
, runCommand
, jq
, yq
}:
{ pname ? null
# A list of dependency package names.
, dependencies
# An attribute set of package names to sources.
, dependencySources
}:
let
packages = lib.genAttrs dependencies (dependency: rec {
src = dependencySources.${dependency};
inherit (src) packageRoot;
});
in
(runCommand "${lib.optionalString (pname != null) "${pname}-"}package-config.json" {
inherit packages;
nativeBuildInputs = [ jq yq ];
__structuredAttrs = true;
}) ''
declare -A packageSources
declare -A packageRoots
while IFS=',' read -r name src packageRoot; do
packageSources["$name"]="$src"
packageRoots["$name"]="$packageRoot"
done < <(jq -r '.packages | to_entries | map("\(.key),\(.value.src),\(.value.packageRoot)") | .[]' "$NIX_ATTRS_JSON_FILE")
for package in "''${!packageSources[@]}"; do
if [ ! -e "''${packageSources["$package"]}/''${packageRoots["$package"]}/pubspec.yaml" ]; then
echo >&2 "The package sources for $package are missing. Is the following path inside the source derivation?"
echo >&2 "Source path: ''${packageSources["$package"]}/''${packageRoots["$package"]}/pubspec.yaml"
exit 1
fi
languageConstraint="$(yq -r .environment.sdk "''${packageSources["$package"]}/''${packageRoots["$package"]}/pubspec.yaml")"
if [[ "$languageConstraint" =~ ^[[:space:]]*(\^|>=|>)?[[:space:]]*([[:digit:]]+\.[[:digit:]]+)\.[[:digit:]]+.*$ ]]; then
languageVersionJson="\"''${BASH_REMATCH[2]}\""
elif [ "$languageConstraint" = 'any' ]; then
languageVersionJson='null'
else
# https://github.com/dart-lang/pub/blob/68dc2f547d0a264955c1fa551fa0a0e158046494/lib/src/language_version.dart#L106C35-L106C35
languageVersionJson='"2.7"'
fi
jq --null-input \
--arg name "$package" \
--arg path "''${packageSources["$package"]}/''${packageRoots["$package"]}" \
--argjson languageVersion "$languageVersionJson" \
'{
name: $name,
rootUri: "file://\($path)",
packageUri: "lib/",
languageVersion: $languageVersion,
}'
done | jq > "$out" --slurp '{
configVersion: 2,
generator: "nixpkgs",
packages: .,
}'
''

View file

@ -0,0 +1,119 @@
{ lib
, callPackage
, fetchurl
, fetchgit
, runCommand
}:
{
# The source directory of the package.
src
# The package subdirectory within src.
# Useful if the package references sibling packages with relative paths.
, packageRoot ? "."
# The pubspec.lock file, in attribute set form.
, pubspecLock
# Hashes for Git dependencies.
# Pub does not record these itself, so they must be manually provided.
, gitHashes ? { }
# Functions to generate SDK package sources.
# The function names should match the SDK names, and the package name is given as an argument.
, sdkSourceBuilders ? { }
# Functions that create custom package source derivations.
#
# The function names should match the package names, and the package version,
# source, and source files are given in an attribute set argument.
#
# The passthru of the source derivation should be propagated.
, customSourceBuilders ? { }
}:
let
dependencyVersions = builtins.mapAttrs (name: details: details.version) pubspecLock.packages;
dependencyTypes = {
"direct main" = "main";
"direct dev" = "dev";
"direct overridden" = "overridden";
"transitive" = "transitive";
};
dependencies = lib.foldlAttrs
(dependencies: name: details: dependencies // { ${dependencyTypes.${details.dependency}} = dependencies.${dependencyTypes.${details.dependency}} ++ [ name ]; })
(lib.genAttrs (builtins.attrValues dependencyTypes) (dependencyType: [ ]))
pubspecLock.packages;
# fetchTarball fails with "tarball contains an unexpected number of top-level files". This is a workaround.
# https://discourse.nixos.org/t/fetchtarball-with-multiple-top-level-directories-fails/20556
mkHostedDependencySource = name: details:
let
archive = fetchurl {
name = "pub-${name}-${details.version}.tar.gz";
url = "${details.description.url}/packages/${details.description.name}/versions/${details.version}.tar.gz";
sha256 = details.description.sha256;
};
in
runCommand "pub-${name}-${details.version}" { passthru.packageRoot = "."; } ''
mkdir -p "$out"
tar xf '${archive}' -C "$out"
'';
mkGitDependencySource = name: details: (fetchgit {
name = "pub-${name}-${details.version}";
url = details.description.url;
rev = details.description.resolved-ref;
hash = gitHashes.${name} or (throw "A Git hash is required for ${name}! Set to an empty string to obtain it.");
}).overrideAttrs ({ passthru ? { }, ... }: {
passthru = passthru // {
packageRoot = details.description.path;
};
});
mkPathDependencySource = name: details:
assert lib.assertMsg details.description.relative "Only relative paths are supported - ${name} has an absolue path!";
(if lib.isDerivation src then src else (runCommand "pub-${name}-${details.version}" { } ''cp -r '${src}' "$out"'')).overrideAttrs ({ passthru ? { }, ... }: {
passthru = passthru // {
packageRoot = "${packageRoot}/${details.description.path}";
};
});
mkSdkDependencySource = name: details:
(sdkSourceBuilders.${details.description} or (throw "No SDK source builder has been given for ${details.description}!")) name;
addDependencySourceUtils = dependencySource: details: dependencySource.overrideAttrs ({ passthru, ... }: {
passthru = passthru // {
inherit (details) version;
};
});
sourceBuilders = callPackage ../../../development/compilers/dart/package-source-builders { } // customSourceBuilders;
dependencySources = lib.filterAttrs (name: src: src != null) (builtins.mapAttrs
(name: details:
(sourceBuilders.${name} or ({ src, ... }: src)) {
inherit (details) version source;
src = ((addDependencySourceUtils (({
"hosted" = mkHostedDependencySource;
"git" = mkGitDependencySource;
"path" = mkPathDependencySource;
"sdk" = mkSdkDependencySource;
}.${details.source} name) details)) details);
})
pubspecLock.packages);
in
{
inherit
# An attribute set of dependency categories to package name lists.
dependencies
# An attribute set of package names to their versions.
dependencyVersions
# An attribute set of package names to their sources.
dependencySources;
}

View file

@ -0,0 +1,55 @@
# expr and script based on our lsb_release
{ stdenv
, lib
, substituteAll
, coreutils
, getopt
, modDirVersion ? ""
}:
substituteAll {
name = "uname";
src = ./deterministic-uname.sh;
dir = "bin";
isExecutable = true;
inherit coreutils getopt;
uSystem = if stdenv.buildPlatform.uname.system != null then stdenv.buildPlatform.uname.system else "unknown";
inherit (stdenv.buildPlatform.uname) processor;
# uname -o
# maybe add to lib/systems/default.nix uname attrset
# https://github.com/coreutils/coreutils/blob/7fc84d1c0f6b35231b0b4577b70aaa26bf548a7c/src/uname.c#L373-L374
# https://stackoverflow.com/questions/61711186/where-does-host-operating-system-in-uname-c-comes-from
# https://github.com/coreutils/gnulib/blob/master/m4/host-os.m4
operatingSystem =
if stdenv.buildPlatform.isLinux
then "GNU/Linux"
else if stdenv.buildPlatform.isDarwin
then "Darwin" # darwin isn't in host-os.m4 so where does this come from?
else "unknown";
# in os-specific/linux module packages
# --replace '$(shell uname -r)' "${kernel.modDirVersion}" \
# is a common thing to do.
modDirVersion = if modDirVersion != "" then modDirVersion else "unknown";
meta = with lib; {
description = "Print certain system information (hardcoded with lib/system values)";
mainProgram = "uname";
longDescription = ''
This package provides a replacement for `uname` whose output depends only
on `stdenv.buildPlatform`. It is meant to be used from within derivations.
Many packages' build processes run `uname` at compile time and embed its
output into the result of the build. Since `uname` calls into the kernel,
and the Nix sandbox currently does not intercept these calls, builds made
on different kernels will produce different results.
'';
license = [ licenses.mit ];
maintainers = with maintainers; [ artturin ];
platforms = platforms.all;
};
}

View file

@ -0,0 +1,174 @@
#! @shell@
set -o errexit
set -o nounset
show_help() {
@coreutils@/bin/cat << EOF
Usage: uname [OPTION]...
Print certain system information. With no OPTION, same as -s.
-a, --all print all information, in the following order,
except omit -p and -i if unknown:
-s, --kernel-name print the kernel name
-n, --nodename print the network node hostname
-r, --kernel-release print the kernel release
-v, --kernel-version print the kernel version
-m, --machine print the machine hardware name
-p, --processor print the processor type (non-portable)
-i, --hardware-platform print the hardware platform (non-portable)
-o, --operating-system print the operating system
--help display this help and exit
--version output version information and exit
EOF
exit 0
}
# Potential command-line options.
version=0
all=0
kernel_name=0
nodename=0
kernel_release=0
kernel_version=0
machine=0
processor=0
hardware_platform=0
operating_system=0
# With no OPTION, same as -s.
if [[ $# -eq 0 ]]; then
kernel_name=1
fi
@getopt@/bin/getopt --test > /dev/null && rc=$? || rc=$?
if [[ $rc -ne 4 ]]; then
# This shouldn't happen.
echo "Warning: Enhanced getopt not supported, please open an issue in nixpkgs." >&2
else
# Define all short and long options.
SHORT=hvsnrvmpioa
LONG=help,version,kernel-name,nodename,kernel-release,kernel-version,machine,processor,hardware-platform,operating-system,all
# Parse all options.
PARSED=`@getopt@/bin/getopt --options $SHORT --longoptions $LONG --name "$0" -- "$@"`
eval set -- "$PARSED"
fi
# Process each argument, and set the appropriate flag if we recognize it.
while [[ $# -ge 1 ]]; do
case "$1" in
--version)
version=1
;;
-s|--kernel-name)
kernel_name=1
;;
-n|--nodename)
nodename=1
;;
-r|--kernel-release)
kernel_release=1
;;
-v|--kernel-version)
kernel_version=1
;;
-m|--machine)
machine=1
;;
-p|--processor)
processor=1
;;
-i|--hardware-platform)
hardware_platform=1
;;
-o|--operating-system)
operating_system=1
;;
-a|--all)
all=1
;;
--help)
show_help
;;
--)
shift
break
;;
*)
echo "uname: unrecognized option '$1'"
echo "Type 'uname --help' for a list of available options."
exit 1
;;
esac
shift
done
KERNEL_NAME_VAL=@uSystem@
NODENAME_VAL=nixpkgs
KERNEL_RELEASE_VAL=@modDirVersion@
# #1-NixOS SMP PREEMPT_DYNAMIC Wed Dec 14 10:41:06 UTC 2022
KERNEL_VERSION_VAL="#1-NixOS Tue Jan 1 00:00:00 UTC 1980"
MACHINE_VAL=@processor@
PROCESSOR_VAL=unknown
HARDWARE_PLATFORM_VAL=unknown
OPERATING_SYSTEM_VAL=@operatingSystem@
if [[ "$version" = "1" ]]; then
# in case some script greps for GNU coreutils.
echo "uname (GNU coreutils) 9.1"
echo "Nixpkgs deterministic-uname"
exit
fi
# output of the real uname from GNU coreutils
# Darwin:
# Darwin *nodename* 22.1.0 Darwin Kernel Version 22.1.0: Sun Oct 9 20:14:30 PDT 2022; root:xnu-8792.41.9~2/RELEASE_ARM64_T8103 arm64 arm Darwin
# NixOS:
# Linux *nodename* 6.0.13 #1-NixOS SMP PREEMPT_DYNAMIC Wed Dec 14 10:41:06 UTC 2022 x86_64 GNU/Linux
output=()
if [[ "$all" = "1" ]]; then
output+=("$KERNEL_NAME_VAL" "$NODENAME_VAL" "$KERNEL_RELEASE_VAL" "$KERNEL_VERSION_VAL" "$MACHINE_VAL")
# in help: except omit -p and -i if unknown.
# output+=($PROCESSOR_VAL $HARDWARE_PLATFORM_VAL)
output+=("$OPERATING_SYSTEM_VAL")
fi
if [[ "$kernel_name" = "1" ]]; then
output+=("$KERNEL_NAME_VAL")
fi
if [[ "$nodename" = "1" ]]; then
output+=("$NODENAME_VAL")
fi
if [[ "$kernel_release" = "1" ]]; then
output+=("$KERNEL_RELEASE_VAL")
fi
if [[ "$kernel_version" = "1" ]]; then
output+=("$KERNEL_VERSION_VAL")
fi
if [[ "$machine" = "1" ]]; then
output+=("$MACHINE_VAL")
fi
if [[ "$processor" = "1" ]]; then
output+=("$PROCESSOR_VAL")
fi
if [[ "$hardware_platform" = "1" ]]; then
output+=("$HARDWARE_PLATFORM_VAL")
fi
if [[ "$operating_system" = "1" ]]; then
output+=("$OPERATING_SYSTEM_VAL")
fi
echo "${output[@]}"

View file

@ -0,0 +1,25 @@
{ dhallPackages, dhallPackageToNix}:
# `dhallDirectoryToNix is a utility function to take a directory of Dhall files
# and read them in as a Nix expression.
#
# This function is similar to `dhallToNix`, but takes a Nixpkgs Dhall package
# as input instead of raw Dhall code.
#
# Note that this uses "import from derivation" (IFD), meaning that Nix will
# perform a build during the evaluation phase if you use this
# `dhallDirectoryToNix` utility. It is not possible to use
# `dhallDirectoryToNix` in Nixpkgs, since the Nixpkgs Hydra doesn't allow IFD.
{ src
, # The file to import, relative to the src root directory
file ? "package.dhall"
}@args:
let
generatedPkg = dhallPackages.generateDhallDirectoryPackage args;
builtPkg = dhallPackages.callPackage generatedPkg { };
in
dhallPackageToNix builtPkg

View file

@ -0,0 +1,36 @@
# `dhallPackageToNix` is a utility function to take a Nixpkgs Dhall package
# (created with a function like `dhallPackages.buildDhallDirectoryPackage`)
# and read it in as a Nix expression.
#
# This function is similar to `dhallToNix`, but takes a Nixpkgs Dhall package
# as input instead of raw Dhall code.
#
# Note that this uses "import from derivation" (IFD), meaning that Nix will
# perform a build during the evaluation phase if you use this
# `dhallPackageToNix` utility. It is not possible to use `dhallPackageToNix`
# in Nixpkgs, since the Nixpkgs Hydra doesn't allow IFD.
{ stdenv, dhall-nix }:
dhallPackage:
let
drv = stdenv.mkDerivation {
name = "dhall-compiled-package.nix";
buildCommand = ''
# Dhall requires that the cache is writable, even if it is never written to.
# We copy the cache from the input package to the current directory and
# set the cache as writable.
cp -r "${dhallPackage}/.cache" ./
export XDG_CACHE_HOME=$PWD/.cache
chmod -R +w ./.cache
dhall-to-nix <<< "${dhallPackage}/binary.dhall" > $out
'';
nativeBuildInputs = [ dhall-nix ];
};
in
import drv

View file

@ -0,0 +1,38 @@
/* `dhallToNix` is a utility function to convert expressions in the Dhall
configuration language to their corresponding Nix expressions.
Example:
dhallToNix "{ foo = 1, bar = True }"
=> { foo = 1; bar = true; }
dhallToNix "λ(x : Bool) x == False"
=> x : x == false
dhallToNix "λ(x : Bool) x == False" false
=> true
See https://hackage.haskell.org/package/dhall-nix/docs/Dhall-Nix.html for
a longer tutorial
Note that this uses "import from derivation", meaning that Nix will perform
a build during the evaluation phase if you use this `dhallToNix` utility
*/
{ stdenv, dhall-nix, writeText }:
let
dhallToNix = code :
let
file = writeText "dhall-expression" code;
drv = stdenv.mkDerivation {
name = "dhall-compiled.nix";
buildCommand = ''
dhall-to-nix <<< "${file}" > $out
'';
buildInputs = [ dhall-nix ];
};
in
import drv;
in
dhallToNix

View file

@ -0,0 +1,7 @@
# Build support for D
Build utilities for the D language can be found in this directory.
### Current maintainers
- @TomaSajt
- @jtbx

View file

@ -0,0 +1,124 @@
{
lib,
stdenv,
fetchurl,
linkFarm,
dub,
ldc,
removeReferencesTo,
}:
# See https://nixos.org/manual/nixpkgs/unstable#dlang for more detailed usage information
{
# A lockfile generated by `dub-to-nix` from the source of the package.
# Can be either a path to the file, or an attrset already parsed with `lib.importJSON`.
dubLock,
# The build type to pass to `dub build` as a value for the `--build=` flag.
dubBuildType ? "release",
# The flags to pass to `dub build` and `dub test`.
dubFlags ? [ ],
# The flags to pass to `dub build`.
dubBuildFlags ? [ ],
# The flags to pass to `dub test`.
dubTestFlags ? [ ],
# The D compiler to be used by `dub`.
compiler ? ldc,
...
}@args:
let
makeDubDep =
{
pname,
version,
sha256,
}:
{
inherit pname version;
src = fetchurl {
name = "dub-${pname}-${version}.zip";
url = "mirror://dub/${pname}/${version}.zip";
inherit sha256;
};
};
lockJson = if lib.isPath dubLock then lib.importJSON dubLock else dubLock;
lockedDeps = lib.mapAttrsToList (
pname: { version, sha256 }: makeDubDep { inherit pname version sha256; }
) lockJson.dependencies;
# a directory with multiple single element registries
# one big directory with all .zip files leads to version parsing errors
# when the name of a package is a prefix of the name of another package
dubRegistryBase = linkFarm "dub-registry-base" (
map (dep: {
name = "${dep.pname}/${dep.pname}-${dep.version}.zip";
path = dep.src;
}) lockedDeps
);
combinedFlags = "--skip-registry=all --compiler=${lib.getExe compiler} ${toString dubFlags}";
combinedBuildFlags = "${combinedFlags} --build=${dubBuildType} ${toString dubBuildFlags}";
combinedTestFlags = "${combinedFlags} ${toString dubTestFlags}";
in
stdenv.mkDerivation (
builtins.removeAttrs args [ "dubLock" ]
// {
strictDeps = args.strictDeps or true;
nativeBuildInputs = args.nativeBuildInputs or [ ] ++ [
dub
compiler
removeReferencesTo
];
configurePhase =
args.configurePhase or ''
runHook preConfigure
export DUB_HOME="$NIX_BUILD_TOP/.dub"
mkdir -p $DUB_HOME
# register dependencies
${lib.concatMapStringsSep "\n" (dep: ''
dub fetch ${dep.pname}@${dep.version} --cache=user --skip-registry=standard --registry=file://${dubRegistryBase}/${dep.pname}
'') lockedDeps}
runHook postConfigure
'';
buildPhase =
args.buildPhase or ''
runHook preBuild
dub build ${combinedBuildFlags}
runHook postBuild
'';
doCheck = args.doCheck or false;
checkPhase =
args.checkPhase or ''
runHook preCheck
dub test ${combinedTestFlags}
runHook postCheck
'';
preFixup = ''
${args.preFixup or ""}
find "$out" -type f -exec remove-references-to -t ${compiler} '{}' +
'';
disallowedReferences = [ compiler ];
meta = {
platforms = dub.meta.platforms;
} // args.meta or { };
}
)

View file

@ -0,0 +1,5 @@
{ callPackage }:
{
buildDubPackage = callPackage ./builddubpackage { };
dub-to-nix = callPackage ./dub-to-nix { };
}

View file

@ -0,0 +1,19 @@
{
lib,
runCommand,
makeWrapper,
python3,
nix,
}:
runCommand "dub-to-nix"
{
nativeBuildInputs = [ makeWrapper ];
buildInputs = [ python3 ];
}
''
install -Dm755 ${./dub-to-nix.py} "$out/bin/dub-to-nix"
patchShebangs "$out/bin/dub-to-nix"
wrapProgram "$out/bin/dub-to-nix" \
--prefix PATH : ${lib.makeBinPath [ nix ]}
''

View file

@ -0,0 +1,39 @@
#!/usr/bin/env python3
import sys
import json
import os
import subprocess
def eprint(text: str):
print(text, file=sys.stderr)
if not os.path.exists("dub.selections.json"):
eprint("The file `dub.selections.json` does not exist in the current working directory")
eprint("run `dub upgrade --annotate` to generate it")
sys.exit(1)
with open("dub.selections.json") as f:
selectionsJson = json.load(f)
versionDict: dict[str, str] = selectionsJson["versions"]
for pname in versionDict:
version = versionDict[pname]
if version.startswith("~"):
eprint(f'Package "{pname}" has a branch-type version "{version}", which doesn\'t point to a fixed version')
eprint("You can resolve it by manually changing the required version to a fixed one inside `dub.selections.json`")
eprint("When packaging, you might need to create a patch for `dub.sdl` or `dub.json` to accept the changed version")
sys.exit(1)
lockedDependenciesDict: dict[str, dict[str, str]] = {}
for pname in versionDict:
version = versionDict[pname]
eprint(f"Fetching {pname}@{version}")
url = f"https://code.dlang.org/packages/{pname}/{version}.zip"
command = ["nix-prefetch-url", "--type", "sha256", url]
sha256 = subprocess.run(command, check=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.rstrip()
lockedDependenciesDict[pname] = {"version": version, "sha256": sha256}
print(json.dumps({"dependencies": lockedDependenciesDict}, indent=2))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,40 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import json
# If any of the keys below are equal to a certain value
# then we can delete it because it's the default value
SAFEDELS = {
"Size": 0,
"config": {
"ExposedPorts": None,
"MacAddress": "",
"NetworkDisabled": False,
"PortSpecs": None,
"VolumeDriver": ""
}
}
SAFEDELS["container_config"] = SAFEDELS["config"]
def makedet(j, safedels):
for k,v in safedels.items():
if k not in j:
continue
if type(v) == dict:
makedet(j[k], v)
elif j[k] == v:
del j[k]
def main():
j = json.load(sys.stdin)
makedet(j, SAFEDELS)
json.dump(j, sys.stdout, sort_keys=True)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,900 @@
# Examples of using the docker tools to build packages.
#
# This file defines several docker images. In order to use an image,
# build its derivation with `nix-build`, and then load the result with
# `docker load`. For example:
#
# $ nix-build '<nixpkgs>' -A dockerTools.examples.redis
# $ docker load < result
{ pkgs, buildImage, buildLayeredImage, fakeNss, pullImage, shadowSetup, buildImageWithNixDb, pkgsCross, streamNixShellImage }:
let
nixosLib = import ../../../nixos/lib {
# Experimental features need testing too, but there's no point in warning
# about it, so we enable the feature flag.
featureFlags.minimalModules = {};
};
evalMinimalConfig = module: nixosLib.evalModules { modules = [ module ]; };
in
rec {
# 1. basic example
bash = buildImage {
name = "bash";
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
paths = [ pkgs.bashInteractive ];
pathsToLink = [ "/bin" ];
};
};
# 2. service example, layered on another image
redis = buildImage {
name = "redis";
tag = "latest";
# for example's sake, we can layer redis on top of bash or debian
fromImage = bash;
# fromImage = debian;
copyToRoot = pkgs.buildEnv {
name = "image-root";
paths = [ pkgs.redis ];
pathsToLink = [ "/bin" ];
};
runAsRoot = ''
mkdir -p /data
'';
config = {
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
};
};
# 3. another service example
nginx = let
nginxPort = "80";
nginxConf = pkgs.writeText "nginx.conf" ''
user nobody nobody;
daemon off;
error_log /dev/stdout info;
pid /dev/null;
events {}
http {
access_log /dev/stdout;
server {
listen ${nginxPort};
index index.html;
location / {
root ${nginxWebRoot};
}
}
}
'';
nginxWebRoot = pkgs.writeTextDir "index.html" ''
<html><body><h1>Hello from NGINX</h1></body></html>
'';
in
buildLayeredImage {
name = "nginx-container";
tag = "latest";
contents = [
fakeNss
pkgs.nginx
];
extraCommands = ''
mkdir -p tmp/nginx_client_body
# nginx still tries to read this directory even if error_log
# directive is specifying another file :/
mkdir -p var/log/nginx
'';
config = {
Cmd = [ "nginx" "-c" nginxConf ];
ExposedPorts = {
"${nginxPort}/tcp" = {};
};
};
};
# 4. example of pulling an image. could be used as a base for other images
nixFromDockerHub = pullImage {
imageName = "nixos/nix";
imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
finalImageTag = "2.2.1";
finalImageName = "nix";
};
# Same example, but re-fetches every time the fetcher implementation changes.
# NOTE: Only use this for testing, or you'd be wasting a lot of time, network and space.
testNixFromDockerHub = pkgs.testers.invalidateFetcherByDrvHash pullImage {
imageName = "nixos/nix";
imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
finalImageTag = "2.2.1";
finalImageName = "nix";
};
# 5. example of multiple contents, emacs and vi happily coexisting
editors = buildImage {
name = "editors";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [
pkgs.coreutils
pkgs.bash
pkgs.emacs
pkgs.vim
pkgs.nano
];
};
};
# 6. nix example to play with the container nix store
# docker run -it --rm nix nix-store -qR $(nix-build '<nixpkgs>' -A nix)
nix = buildImageWithNixDb {
name = "nix";
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [
# nix-store uses cat program to display results as specified by
# the image env variable NIX_PAGER.
pkgs.coreutils
pkgs.nix
pkgs.bash
];
};
config = {
Env = [
"NIX_PAGER=cat"
# A user is required by nix
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
"USER=nobody"
];
};
};
# 7. example of adding something on top of an image pull by our
# dockerTools chain.
onTopOfPulledImage = buildImage {
name = "onTopOfPulledImage";
tag = "latest";
fromImage = nixFromDockerHub;
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ pkgs.hello ];
};
};
# 8. regression test for erroneous use of eval and string expansion.
# See issue #34779 and PR #40947 for details.
runAsRootExtraCommands = pkgs.dockerTools.buildImage {
name = "runAsRootExtraCommands";
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ pkgs.coreutils ];
};
# The parens here are to create problematic bash to embed and eval. In case
# this is *embedded* into the script (with nix expansion) the initial quotes
# will close the string and the following parens are unexpected
runAsRoot = ''echo "(runAsRoot)" > runAsRoot'';
extraCommands = ''echo "(extraCommand)" > extraCommands'';
};
# 9. Ensure that setting created to now results in a date which
# isn't the epoch + 1
unstableDate = pkgs.dockerTools.buildImage {
name = "unstable-date";
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ pkgs.coreutils ];
};
created = "now";
};
# 10. Create a layered image
layered-image = pkgs.dockerTools.buildLayeredImage {
name = "layered-image";
tag = "latest";
extraCommands = ''echo "(extraCommand)" > extraCommands'';
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
contents = [ pkgs.hello pkgs.bash pkgs.coreutils ];
};
# 11. Create an image on top of a layered image
layered-on-top = pkgs.dockerTools.buildImage {
name = "layered-on-top";
tag = "latest";
fromImage = layered-image;
extraCommands = ''
mkdir ./example-output
chmod 777 ./example-output
'';
config = {
Env = [ "PATH=${pkgs.coreutils}/bin/" ];
WorkingDir = "/example-output";
Cmd = [
"${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
];
};
};
# 12 Create a layered image on top of a layered image
layered-on-top-layered = pkgs.dockerTools.buildLayeredImage {
name = "layered-on-top-layered";
tag = "latest";
fromImage = layered-image;
extraCommands = ''
mkdir ./example-output
chmod 777 ./example-output
'';
config = {
Env = [ "PATH=${pkgs.coreutils}/bin/" ];
WorkingDir = "/example-output";
Cmd = [
"${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
];
};
};
# 13. example of running something as root on top of a parent image
# Regression test related to PR #52109
runAsRootParentImage = buildImage {
name = "runAsRootParentImage";
tag = "latest";
runAsRoot = "touch /example-file";
fromImage = bash;
};
# 14. example of 3 layers images This image is used to verify the
# order of layers is correct.
# It allows to validate
# - the layer of parent are below
# - the order of parent layer is preserved at image build time
# (this is why there are 3 images)
layersOrder = let
l1 = pkgs.dockerTools.buildImage {
name = "l1";
tag = "latest";
extraCommands = ''
mkdir -p tmp
echo layer1 > tmp/layer1
echo layer1 > tmp/layer2
echo layer1 > tmp/layer3
'';
};
l2 = pkgs.dockerTools.buildImage {
name = "l2";
fromImage = l1;
tag = "latest";
extraCommands = ''
mkdir -p tmp
echo layer2 > tmp/layer2
echo layer2 > tmp/layer3
'';
};
in pkgs.dockerTools.buildImage {
name = "l3";
fromImage = l2;
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ pkgs.coreutils ];
};
extraCommands = ''
mkdir -p tmp
echo layer3 > tmp/layer3
'';
};
# 15. Environment variable inheritance.
# Child image should inherit parents environment variables,
# optionally overriding them.
environmentVariablesParent = pkgs.dockerTools.buildImage {
name = "parent";
tag = "latest";
config = {
Env = [
"FROM_PARENT=true"
"LAST_LAYER=parent"
];
};
};
environmentVariables = pkgs.dockerTools.buildImage {
name = "child";
fromImage = environmentVariablesParent;
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ pkgs.coreutils ];
};
config = {
Env = [
"FROM_CHILD=true"
"LAST_LAYER=child"
];
};
};
environmentVariablesLayered = pkgs.dockerTools.buildLayeredImage {
name = "child";
fromImage = environmentVariablesParent;
tag = "latest";
contents = [ pkgs.coreutils ];
config = {
Env = [
"FROM_CHILD=true"
"LAST_LAYER=child"
];
};
};
# 16. Create another layered image, for comparing layers with image 10.
another-layered-image = pkgs.dockerTools.buildLayeredImage {
name = "another-layered-image";
tag = "latest";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
};
# 17. Create a layered image with only 2 layers
two-layered-image = pkgs.dockerTools.buildLayeredImage {
name = "two-layered-image";
tag = "latest";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
contents = [ pkgs.bash pkgs.hello ];
maxLayers = 2;
};
# 18. Create a layered image with more packages than max layers.
# coreutils and hello are part of the same layer
bulk-layer = pkgs.dockerTools.buildLayeredImage {
name = "bulk-layer";
tag = "latest";
contents = with pkgs; [
coreutils hello
];
maxLayers = 2;
};
# 19. Create a layered image with a base image and more packages than max
# layers. coreutils and hello are part of the same layer
layered-bulk-layer = pkgs.dockerTools.buildLayeredImage {
name = "layered-bulk-layer";
tag = "latest";
fromImage = two-layered-image;
contents = with pkgs; [
coreutils hello
];
maxLayers = 4;
};
# 20. Create a "layered" image without nix store layers. This is not
# recommended, but can be useful for base images in rare cases.
no-store-paths = pkgs.dockerTools.buildLayeredImage {
name = "no-store-paths";
tag = "latest";
extraCommands = ''
# This removes sharing of busybox and is not recommended. We do this
# to make the example suitable as a test case with working binaries.
cp -r ${pkgs.pkgsStatic.busybox}/* .
# This is a "build" dependency that will not appear in the image
${pkgs.hello}/bin/hello
'';
};
nixLayered = pkgs.dockerTools.buildLayeredImageWithNixDb {
name = "nix-layered";
tag = "latest";
contents = [
# nix-store uses cat program to display results as specified by
# the image env variable NIX_PAGER.
pkgs.coreutils
pkgs.nix
pkgs.bash
];
config = {
Env = [
"NIX_PAGER=cat"
# A user is required by nix
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
"USER=nobody"
];
};
};
# 21. Support files in the store on buildLayeredImage
# See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
name = "file-in-store";
tag = "latest";
contents = [
pkgs.coreutils
pkgs.nix
(pkgs.writeScriptBin "myscript" ''
#!${pkgs.runtimeShell}
cat ${pkgs.writeText "somefile" "some data"}
'')
];
config = {
Cmd = [ "myscript" ];
# For some reason 'nix-store --verify' requires this environment variable
Env = [ "USER=root" ];
};
};
# 22. Ensure that setting created to now results in a date which
# isn't the epoch + 1 for layered images.
unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
name = "unstable-date-layered";
tag = "latest";
contents = [ pkgs.coreutils ];
created = "now";
};
# 23. Ensure that layers are unpacked in the correct order before the
# runAsRoot script is executed.
layersUnpackOrder =
let
layerOnTopOf = parent: layerName:
pkgs.dockerTools.buildImage {
name = "layers-unpack-order-${layerName}";
tag = "latest";
fromImage = parent;
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ pkgs.coreutils ];
};
runAsRoot = ''
#!${pkgs.runtimeShell}
echo -n "${layerName}" >> /layer-order
'';
};
# When executing the runAsRoot script when building layer C, if layer B is
# not unpacked on top of layer A, the contents of /layer-order will not be
# "ABC".
layerA = layerOnTopOf null "a";
layerB = layerOnTopOf layerA "b";
layerC = layerOnTopOf layerB "c";
in layerC;
bashUncompressed = pkgs.dockerTools.buildImage {
name = "bash-uncompressed";
tag = "latest";
compressor = "none";
# Not recommended. Use `buildEnv` between copy and packages to avoid file duplication.
copyToRoot = pkgs.bashInteractive;
};
bashZstdCompressed = pkgs.dockerTools.buildImage {
name = "bash-zstd";
tag = "latest";
compressor = "zstd";
# Not recommended. Use `buildEnv` between copy and packages to avoid file duplication.
copyToRoot = pkgs.bashInteractive;
};
# buildImage without explicit tag
bashNoTag = pkgs.dockerTools.buildImage {
name = "bash-no-tag";
# Not recommended. Use `buildEnv` between copy and packages to avoid file duplication.
copyToRoot = pkgs.bashInteractive;
};
# buildLayeredImage without explicit tag
bashNoTagLayered = pkgs.dockerTools.buildLayeredImage {
name = "bash-no-tag-layered";
contents = pkgs.bashInteractive;
};
# buildLayeredImage without compression
bashLayeredUncompressed = pkgs.dockerTools.buildLayeredImage {
name = "bash-layered-uncompressed";
tag = "latest";
compressor = "none";
contents = pkgs.bashInteractive;
};
# buildLayeredImage with zstd compression
bashLayeredZstdCompressed = pkgs.dockerTools.buildLayeredImage {
name = "bash-layered-zstd";
tag = "latest";
compressor = "zstd";
contents = pkgs.bashInteractive;
};
# streamLayeredImage without explicit tag
bashNoTagStreamLayered = pkgs.dockerTools.streamLayeredImage {
name = "bash-no-tag-stream-layered";
contents = pkgs.bashInteractive;
};
# buildLayeredImage with non-root user
bashLayeredWithUser =
let
nonRootShadowSetup = { user, uid, gid ? uid }: with pkgs; [
(
writeTextDir "etc/shadow" ''
root:!x:::::::
${user}:!:::::::
''
)
(
writeTextDir "etc/passwd" ''
root:x:0:0::/root:${runtimeShell}
${user}:x:${toString uid}:${toString gid}::/home/${user}:
''
)
(
writeTextDir "etc/group" ''
root:x:0:
${user}:x:${toString gid}:
''
)
(
writeTextDir "etc/gshadow" ''
root:x::
${user}:x::
''
)
];
in
pkgs.dockerTools.buildLayeredImage {
name = "bash-layered-with-user";
tag = "latest";
contents = [ pkgs.bash pkgs.coreutils ] ++ nonRootShadowSetup { uid = 999; user = "somebody"; };
};
# basic example, with cross compilation
cross = let
# Cross compile for x86_64 if on aarch64
crossPkgs =
if pkgs.stdenv.hostPlatform.system == "aarch64-linux" then pkgsCross.gnu64
else pkgsCross.aarch64-multiplatform;
in crossPkgs.dockerTools.buildImage {
name = "hello-cross";
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root";
pathsToLink = [ "/bin" ];
paths = [ crossPkgs.hello ];
};
};
# layered image where a store path is itself a symlink
layeredStoreSymlink =
let
target = pkgs.writeTextDir "dir/target" "Content doesn't matter.";
symlink = pkgs.runCommand "symlink" {} "ln -s ${target} $out";
in
pkgs.dockerTools.buildLayeredImage {
name = "layeredstoresymlink";
tag = "latest";
contents = [ pkgs.bash symlink ];
} // { passthru = { inherit symlink; }; };
# image with registry/ prefix
prefixedImage = pkgs.dockerTools.buildImage {
name = "registry-1.docker.io/image";
tag = "latest";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
};
# layered image with registry/ prefix
prefixedLayeredImage = pkgs.dockerTools.buildLayeredImage {
name = "registry-1.docker.io/layered-image";
tag = "latest";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
};
# layered image with files owned by a user other than root
layeredImageWithFakeRootCommands = pkgs.dockerTools.buildLayeredImage {
name = "layered-image-with-fake-root-commands";
tag = "latest";
contents = [
pkgs.pkgsStatic.busybox
];
fakeRootCommands = ''
mkdir -p ./home/alice
chown 1000 ./home/alice
ln -s ${pkgs.hello.overrideAttrs (o: {
# A unique `hello` to make sure that it isn't included via another mechanism by accident.
configureFlags = o.configureFlags or [] ++ [ " --program-prefix=layeredImageWithFakeRootCommands-" ];
doCheck = false;
})} ./hello
'';
};
# tarball consisting of both bash and redis images
mergedBashAndRedis = pkgs.dockerTools.mergeImages [
bash
redis
];
# tarball consisting of bash (without tag) and redis images
mergedBashNoTagAndRedis = pkgs.dockerTools.mergeImages [
bashNoTag
redis
];
# tarball consisting of bash and layered image with different owner of the
# /home/alice directory
mergedBashFakeRoot = pkgs.dockerTools.mergeImages [
bash
layeredImageWithFakeRootCommands
];
mergeVaryingCompressor = pkgs.dockerTools.mergeImages [
redis
bashUncompressed
bashZstdCompressed
];
helloOnRoot = pkgs.dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
contents = [
(pkgs.buildEnv {
name = "hello-root";
paths = [ pkgs.hello ];
})
];
config.Cmd = [ "hello" ];
};
helloOnRootNoStore = pkgs.dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
contents = [
(pkgs.buildEnv {
name = "hello-root";
paths = [ pkgs.hello ];
})
];
config.Cmd = [ "hello" ];
includeStorePaths = false;
};
helloOnRootNoStoreFakechroot = pkgs.dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
contents = [
(pkgs.buildEnv {
name = "hello-root";
paths = [ pkgs.hello ];
})
];
config.Cmd = [ "hello" ];
includeStorePaths = false;
enableFakechroot = true;
};
etc =
let
inherit (pkgs) lib;
nixosCore = (evalMinimalConfig ({ config, ... }: {
imports = [
pkgs.pkgsModule
../../../nixos/modules/system/etc/etc.nix
];
environment.etc."some-config-file" = {
text = ''
127.0.0.1 localhost
::1 localhost
'';
# For executables:
# mode = "0755";
};
}));
in pkgs.dockerTools.streamLayeredImage {
name = "etc";
tag = "latest";
enableFakechroot = true;
fakeRootCommands = ''
mkdir -p /etc
${nixosCore.config.system.build.etcActivationCommands}
'';
config.Cmd = pkgs.writeScript "etc-cmd" ''
#!${pkgs.busybox}/bin/sh
${pkgs.busybox}/bin/cat /etc/some-config-file
'';
};
# Example export of the bash image
exportBash = pkgs.dockerTools.exportImage { fromImage = bash; };
imageViaFakeChroot = pkgs.dockerTools.streamLayeredImage {
name = "image-via-fake-chroot";
tag = "latest";
config.Cmd = [ "hello" ];
enableFakechroot = true;
# Crucially, instead of a relative path, this creates /bin, which is
# intercepted by fakechroot.
# This functionality is not available on darwin as of 2021.
fakeRootCommands = ''
mkdir /bin
ln -s ${pkgs.hello}/bin/hello /bin/hello
'';
};
build-image-with-path = buildImage {
name = "build-image-with-path";
tag = "latest";
# Not recommended. Use `buildEnv` between copy and packages to avoid file duplication.
copyToRoot = [ pkgs.bashInteractive ./test-dummy ];
};
layered-image-with-path = pkgs.dockerTools.streamLayeredImage {
name = "layered-image-with-path";
tag = "latest";
contents = [ pkgs.bashInteractive ./test-dummy ];
};
build-image-with-architecture = buildImage {
name = "build-image-with-architecture";
tag = "latest";
architecture = "arm64";
# Not recommended. Use `buildEnv` between copy and packages to avoid file duplication.
copyToRoot = [ pkgs.bashInteractive ./test-dummy ];
};
layered-image-with-architecture = pkgs.dockerTools.streamLayeredImage {
name = "layered-image-with-architecture";
tag = "latest";
architecture = "arm64";
contents = [ pkgs.bashInteractive ./test-dummy ];
};
# ensure that caCertificates builds
image-with-certs = buildImage {
name = "image-with-certs";
tag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-with-certs-root";
paths = [
pkgs.coreutils
pkgs.dockerTools.caCertificates
];
};
config = {
};
};
nix-shell-basic = streamNixShellImage {
name = "nix-shell-basic";
tag = "latest";
drv = pkgs.hello;
};
nix-shell-hook = streamNixShellImage {
name = "nix-shell-hook";
tag = "latest";
drv = pkgs.mkShell {
shellHook = ''
echo "This is the shell hook!"
exit
'';
};
};
nix-shell-inputs = streamNixShellImage {
name = "nix-shell-inputs";
tag = "latest";
drv = pkgs.mkShell {
nativeBuildInputs = [
pkgs.hello
];
};
command = ''
hello
'';
};
nix-shell-pass-as-file = streamNixShellImage {
name = "nix-shell-pass-as-file";
tag = "latest";
drv = pkgs.mkShell {
str = "this is a string";
passAsFile = [ "str" ];
};
command = ''
cat "$strPath"
'';
};
nix-shell-run = streamNixShellImage {
name = "nix-shell-run";
tag = "latest";
drv = pkgs.mkShell {};
run = ''
case "$-" in
*i*) echo This shell is interactive ;;
*) echo This shell is not interactive ;;
esac
'';
};
nix-shell-command = streamNixShellImage {
name = "nix-shell-command";
tag = "latest";
drv = pkgs.mkShell {};
command = ''
case "$-" in
*i*) echo This shell is interactive ;;
*) echo This shell is not interactive ;;
esac
'';
};
nix-shell-writable-home = streamNixShellImage {
name = "nix-shell-writable-home";
tag = "latest";
drv = pkgs.mkShell {};
run = ''
if [[ "$HOME" != "$(eval "echo ~$(whoami)")" ]]; then
echo "\$HOME ($HOME) is not the same as ~\$(whoami) ($(eval "echo ~$(whoami)"))"
exit 1
fi
if ! touch $HOME/test-file; then
echo "home directory is not writable"
exit 1
fi
echo "home directory is writable"
'';
};
nix-shell-nonexistent-home = streamNixShellImage {
name = "nix-shell-nonexistent-home";
tag = "latest";
drv = pkgs.mkShell {};
homeDirectory = "/homeless-shelter";
run = ''
if [[ "$HOME" != "$(eval "echo ~$(whoami)")" ]]; then
echo "\$HOME ($HOME) is not the same as ~\$(whoami) ($(eval "echo ~$(whoami)"))"
exit 1
fi
if -e $HOME; then
echo "home directory exists"
exit 1
fi
echo "home directory doesn't exist"
'';
};
nix-shell-build-derivation = streamNixShellImage {
name = "nix-shell-build-derivation";
tag = "latest";
drv = pkgs.hello;
run = ''
buildDerivation
$out/bin/hello
'';
};
}

View file

@ -0,0 +1,173 @@
#! /usr/bin/env bash
set -e -o pipefail
os=
arch=
imageName=
imageTag=
imageDigest=
finalImageName=
finalImageTag=
hashType=$NIX_HASH_ALGO
hashFormat=$hashFormat
format=nix
usage(){
echo >&2 "syntax: nix-prefetch-docker [options] [IMAGE_NAME [IMAGE_TAG|IMAGE_DIGEST]]
Options:
--os os OS to fetch image for
--arch linux Arch to fetch image for
--image-name name Name of the image to fetch
--image-tag tag Image tag
--image-digest digest Image digest
--final-image-name name Desired name of the image
--final-image-tag tag Desired image tag
--json Output result in json format instead of nix
--quiet Only print the final result
"
exit 1
}
get_image_digest(){
local imageName=$1
local imageTag=$2
if test -z "$imageTag"; then
imageTag="latest"
fi
skopeo --override-os "${os}" --override-arch "${arch}" --insecure-policy --tmpdir=$TMPDIR inspect "docker://$imageName:$imageTag" | jq '.Digest' -r
}
get_name() {
local imageName=$1
local imageTag=$2
echo "docker-image-$(echo "$imageName:$imageTag" | tr '/:' '-').tar"
}
argi=0
argfun=""
for arg; do
if test -z "$argfun"; then
case $arg in
--os) argfun=set_os;;
--arch) argfun=set_arch;;
--image-name) argfun=set_imageName;;
--image-tag) argfun=set_imageTag;;
--image-digest) argfun=set_imageDigest;;
--final-image-name) argfun=set_finalImageName;;
--final-image-tag) argfun=set_finalImageTag;;
--quiet) QUIET=true;;
--json) format=json;;
--help) usage; exit;;
*)
: $((++argi))
case $argi in
1) imageName=$arg;;
2) [[ $arg == *"sha256"* ]] && imageDigest=$arg || imageTag=$arg;;
*) exit 1;;
esac
;;
esac
else
case $argfun in
set_*)
var=${argfun#set_}
eval $var=$arg
;;
esac
argfun=""
fi
done
if test -z "$imageName"; then
usage
fi
if test -z "$os"; then
os=linux
fi
if test -z "$arch"; then
arch=amd64
fi
if test -z "$hashType"; then
hashType=sha256
fi
if test -z "$hashFormat"; then
hashFormat=base32
fi
if test -z "$finalImageName"; then
finalImageName="$imageName"
fi
if test -z "$finalImageTag"; then
if test -z "$imageTag"; then
finalImageTag="latest"
else
finalImageTag="$imageTag"
fi
fi
if test -z "$imageDigest"; then
imageDigest=$(get_image_digest $imageName $imageTag)
fi
sourceUrl="docker://$imageName@$imageDigest"
tmpPath="$(mktemp -d "${TMPDIR:-/tmp}/skopeo-copy-tmp-XXXXXXXX")"
trap "rm -rf \"$tmpPath\"" EXIT
tmpFile="$tmpPath/$(get_name $finalImageName $finalImageTag)"
if test -z "$QUIET"; then
skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" >&2
else
skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" > /dev/null
fi
# Compute the hash.
imageHash=$(nix-hash --flat --type $hashType --base32 "$tmpFile")
# Add the downloaded file to Nix store.
finalPath=$(nix-store --add-fixed "$hashType" "$tmpFile")
if test -z "$QUIET"; then
echo "-> ImageName: $imageName" >&2
echo "-> ImageDigest: $imageDigest" >&2
echo "-> FinalImageName: $finalImageName" >&2
echo "-> FinalImageTag: $finalImageTag" >&2
echo "-> ImagePath: $finalPath" >&2
echo "-> ImageHash: $imageHash" >&2
fi
if [ "$format" == "nix" ]; then
cat <<EOF
{
imageName = "$imageName";
imageDigest = "$imageDigest";
sha256 = "$imageHash";
finalImageName = "$finalImageName";
finalImageTag = "$finalImageTag";
}
EOF
else
cat <<EOF
{
"imageName": "$imageName",
"imageDigest": "$imageDigest",
"sha256": "$imageHash",
"finalImageName": "$finalImageName",
"finalImageTag": "$finalImageTag"
}
EOF
fi

View file

@ -0,0 +1,25 @@
{ lib, stdenv, makeWrapper, nix, skopeo, jq }:
stdenv.mkDerivation {
name = "nix-prefetch-docker";
nativeBuildInputs = [ makeWrapper ];
dontUnpack = true;
installPhase = ''
install -vD ${./nix-prefetch-docker} $out/bin/$name;
wrapProgram $out/bin/$name \
--prefix PATH : ${lib.makeBinPath [ nix skopeo jq ]} \
--set HOME /homeless-shelter
'';
preferLocalBuild = true;
meta = with lib; {
description = "Script used to obtain source hashes for dockerTools.pullImage";
mainProgram = "nix-prefetch-docker";
maintainers = with maintainers; [ offline ];
platforms = platforms.unix;
};
}

View file

@ -0,0 +1,398 @@
"""
This script generates a Docker image from a set of store paths. Uses
Docker Image Specification v1.2 as reference [1].
It expects a JSON file with the following properties and writes the
image as an uncompressed tarball to stdout:
* "architecture", "config", "os", "created", "repo_tag" correspond to
the fields with the same name on the image spec [2].
* "created" can be "now".
* "created" is also used as mtime for files added to the image.
* "uid", "gid", "uname", "gname" is the file ownership, for example,
0, 0, "root", "root".
* "store_layers" is a list of layers in ascending order, where each
layer is the list of store paths to include in that layer.
The main challenge for this script to create the final image in a
streaming fashion, without dumping any intermediate data to disk
for performance.
A docker image has each layer contents archived as separate tarballs,
and they later all get enveloped into a single big tarball in a
content addressed fashion. However, because how "tar" format works,
we have to know about the name (which includes the checksum in our
case) and the size of the tarball before we can start adding it to the
outer tarball. We achieve that by creating the layer tarballs twice;
on the first iteration we calculate the file size and the checksum,
and on the second one we actually stream the contents. 'add_layer_dir'
function does all this.
[1]: https://github.com/moby/moby/blob/master/image/spec/v1.2.md
[2]: https://github.com/moby/moby/blob/4fb59c20a4fb54f944fe170d0ff1d00eb4a24d6f/image/spec/v1.2.md#image-json-field-descriptions
""" # noqa: E501
import io
import os
import re
import sys
import json
import hashlib
import pathlib
import tarfile
import itertools
import threading
from datetime import datetime, timezone
from collections import namedtuple
def archive_paths_to(obj, paths, mtime, uid, gid, uname, gname):
"""
Writes the given store paths as a tar file to the given stream.
obj: Stream to write to. Should have a 'write' method.
paths: List of store paths.
"""
# gettarinfo makes the paths relative, this makes them
# absolute again
def append_root(ti):
ti.name = "/" + ti.name
return ti
def apply_filters(ti):
ti.mtime = mtime
ti.uid = uid
ti.gid = gid
ti.uname = uname
ti.gname = gname
return ti
def nix_root(ti):
ti.mode = 0o0755 # rwxr-xr-x
return ti
def dir(path):
ti = tarfile.TarInfo(path)
ti.type = tarfile.DIRTYPE
return ti
with tarfile.open(fileobj=obj, mode="w|") as tar:
# To be consistent with the docker utilities, we need to have
# these directories first when building layer tarballs.
tar.addfile(apply_filters(nix_root(dir("/nix"))))
tar.addfile(apply_filters(nix_root(dir("/nix/store"))))
for path in paths:
path = pathlib.Path(path)
if path.is_symlink():
files = [path]
else:
files = itertools.chain([path], path.rglob("*"))
for filename in sorted(files):
ti = append_root(tar.gettarinfo(filename))
# copy hardlinks as regular files
if ti.islnk():
ti.type = tarfile.REGTYPE
ti.linkname = ""
ti.size = filename.stat().st_size
ti = apply_filters(ti)
if ti.isfile():
with open(filename, "rb") as f:
tar.addfile(ti, f)
else:
tar.addfile(ti)
class ExtractChecksum:
"""
A writable stream which only calculates the final file size and
sha256sum, while discarding the actual contents.
"""
def __init__(self):
self._digest = hashlib.sha256()
self._size = 0
def write(self, data):
self._digest.update(data)
self._size += len(data)
def extract(self):
"""
Returns: Hex-encoded sha256sum and size as a tuple.
"""
return (self._digest.hexdigest(), self._size)
FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
# Some metadata for a layer
LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
def load_from_image(from_image_str):
"""
Loads the given base image, if any.
from_image_str: Path to the base image archive.
Returns: A 'FromImage' object with references to the loaded base image,
or 'None' if no base image was provided.
"""
if from_image_str is None:
return None
base_tar = tarfile.open(from_image_str)
manifest_json_tarinfo = base_tar.getmember("manifest.json")
with base_tar.extractfile(manifest_json_tarinfo) as f:
manifest_json = json.load(f)
image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
with base_tar.extractfile(image_json_tarinfo) as f:
image_json = json.load(f)
return FromImage(base_tar, manifest_json, image_json)
def add_base_layers(tar, from_image):
"""
Adds the layers from the given base image to the final image.
tar: 'tarfile.TarFile' object for new layers to be added to.
from_image: 'FromImage' object with references to the loaded base image.
"""
if from_image is None:
print("No 'fromImage' provided", file=sys.stderr)
return []
layers = from_image.manifest_json[0]["Layers"]
checksums = from_image.image_json["rootfs"]["diff_ids"]
layers_checksums = zip(layers, checksums)
for num, (layer, checksum) in enumerate(layers_checksums, start=1):
layer_tarinfo = from_image.tar.getmember(layer)
checksum = re.sub(r"^sha256:", "", checksum)
tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
path = layer_tarinfo.path
size = layer_tarinfo.size
print("Adding base layer", num, "from", path, file=sys.stderr)
yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
from_image.tar.close()
def overlay_base_config(from_image, final_config):
"""
Overlays the final image 'config' JSON on top of selected defaults from the
base image 'config' JSON.
from_image: 'FromImage' object with references to the loaded base image.
final_config: 'dict' object of the final image 'config' JSON.
"""
if from_image is None:
return final_config
base_config = from_image.image_json["config"]
# Preserve environment from base image
final_env = base_config.get("Env", []) + final_config.get("Env", [])
if final_env:
# Resolve duplicates (last one wins) and format back as list
resolved_env = {entry.split("=", 1)[0]: entry for entry in final_env}
final_config["Env"] = list(resolved_env.values())
return final_config
def add_layer_dir(tar, paths, store_dir, mtime, uid, gid, uname, gname):
"""
Appends given store paths to a TarFile object as a new layer.
tar: 'tarfile.TarFile' object for the new layer to be added to.
paths: List of store paths.
store_dir: the root directory of the nix store
mtime: 'mtime' of the added files and the layer tarball.
Should be an integer representing a POSIX time.
Returns: A 'LayerInfo' object containing some metadata of
the layer added.
"""
invalid_paths = [i for i in paths if not i.startswith(store_dir)]
assert len(invalid_paths) == 0, \
f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
# First, calculate the tarball checksum and the size.
extract_checksum = ExtractChecksum()
archive_paths_to(
extract_checksum,
paths,
mtime, uid, gid, uname, gname
)
(checksum, size) = extract_checksum.extract()
path = f"{checksum}/layer.tar"
layer_tarinfo = tarfile.TarInfo(path)
layer_tarinfo.size = size
layer_tarinfo.mtime = mtime
# Then actually stream the contents to the outer tarball.
read_fd, write_fd = os.pipe()
with open(read_fd, "rb") as read, open(write_fd, "wb") as write:
def producer():
archive_paths_to(
write,
paths,
mtime, uid, gid, uname, gname
)
write.close()
# Closing the write end of the fifo also closes the read end,
# so we don't need to wait until this thread is finished.
#
# Any exception from the thread will get printed by the default
# exception handler, and the 'addfile' call will fail since it
# won't be able to read required amount of bytes.
threading.Thread(target=producer).start()
tar.addfile(layer_tarinfo, read)
return LayerInfo(size=size, checksum=checksum, path=path, paths=paths)
def add_customisation_layer(target_tar, customisation_layer, mtime):
"""
Adds the customisation layer as a new layer. This is layer is structured
differently; given store path has the 'layer.tar' and corresponding
sha256sum ready.
tar: 'tarfile.TarFile' object for the new layer to be added to.
customisation_layer: Path containing the layer archive.
mtime: 'mtime' of the added layer tarball.
"""
checksum_path = os.path.join(customisation_layer, "checksum")
with open(checksum_path) as f:
checksum = f.read().strip()
assert len(checksum) == 64, f"Invalid sha256 at ${checksum_path}."
layer_path = os.path.join(customisation_layer, "layer.tar")
path = f"{checksum}/layer.tar"
tarinfo = target_tar.gettarinfo(layer_path)
tarinfo.name = path
tarinfo.mtime = mtime
with open(layer_path, "rb") as f:
target_tar.addfile(tarinfo, f)
return LayerInfo(
size=None,
checksum=checksum,
path=path,
paths=[customisation_layer]
)
def add_bytes(tar, path, content, mtime):
"""
Adds a file to the tarball with given path and contents.
tar: 'tarfile.TarFile' object.
path: Path of the file as a string.
content: Contents of the file.
mtime: 'mtime' of the file. Should be an integer representing a POSIX time.
"""
assert type(content) is bytes
ti = tarfile.TarInfo(path)
ti.size = len(content)
ti.mtime = mtime
tar.addfile(ti, io.BytesIO(content))
def main():
with open(sys.argv[1], "r") as f:
conf = json.load(f)
created = (
datetime.now(tz=timezone.utc)
if conf["created"] == "now"
else datetime.fromisoformat(conf["created"])
)
mtime = int(created.timestamp())
uid = int(conf["uid"])
gid = int(conf["gid"])
uname = conf["uname"]
gname = conf["gname"]
store_dir = conf["store_dir"]
from_image = load_from_image(conf["from_image"])
with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
layers = []
layers.extend(add_base_layers(tar, from_image))
start = len(layers) + 1
for num, store_layer in enumerate(conf["store_layers"], start=start):
print("Creating layer", num, "from paths:", store_layer,
file=sys.stderr)
info = add_layer_dir(tar, store_layer, store_dir,
mtime, uid, gid, uname, gname)
layers.append(info)
print("Creating layer", len(layers) + 1, "with customisation...",
file=sys.stderr)
layers.append(
add_customisation_layer(
tar,
conf["customisation_layer"],
mtime=mtime
)
)
print("Adding manifests...", file=sys.stderr)
image_json = {
"created": datetime.isoformat(created),
"architecture": conf["architecture"],
"os": "linux",
"config": overlay_base_config(from_image, conf["config"]),
"rootfs": {
"diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
"type": "layers",
},
"history": [
{
"created": datetime.isoformat(created),
"comment": f"store paths: {layer.paths}"
}
for layer in layers
],
}
image_json = json.dumps(image_json, indent=4).encode("utf-8")
image_json_checksum = hashlib.sha256(image_json).hexdigest()
image_json_path = f"{image_json_checksum}.json"
add_bytes(tar, image_json_path, image_json, mtime=mtime)
manifest_json = [
{
"Config": image_json_path,
"RepoTags": [conf["repo_tag"]],
"Layers": [layer.path for layer in layers],
}
]
manifest_json = json.dumps(manifest_json, indent=4).encode("utf-8")
add_bytes(tar, "manifest.json", manifest_json, mtime=mtime)
print("Done.", file=sys.stderr)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,24 @@
package main
import (
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/docker/pkg/tarsum"
)
func main() {
ts, err := tarsum.NewTarSum(os.Stdin, true, tarsum.Version1)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if _, err = io.Copy(ioutil.Discard, ts); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(ts.Sum(nil))
}

View file

@ -0,0 +1,43 @@
{ stdenv, go, docker, nixosTests }:
stdenv.mkDerivation {
name = "tarsum";
nativeBuildInputs = [ go ];
disallowedReferences = [ go ];
dontUnpack = true;
CGO_ENABLED = 0;
GOFLAGS = "-trimpath";
GO111MODULE = "off";
buildPhase = ''
runHook preBuild
mkdir tarsum
cd tarsum
cp ${./tarsum.go} tarsum.go
export GOPATH=$(pwd)
export GOCACHE="$TMPDIR/go-cache"
mkdir -p src/github.com/docker/docker/pkg
ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
go build
runHook postBuild
'';
installPhase = ''
runHook preInstall
mkdir -p $out/bin
cp tarsum $out/bin/
runHook postInstall
'';
passthru = {
tests = {
dockerTools = nixosTests.docker-tools;
};
};
meta.platforms = go.meta.platforms;
meta.mainProgram = "tarsum";
}

View file

@ -0,0 +1 @@
Hello there!

View file

@ -0,0 +1,48 @@
{ buildDotnetModule, emptyDirectory, mkNugetDeps, dotnet-sdk }:
{ pname
, version
# Name of the nuget package to install, if different from pname
, nugetName ? pname
# Hash of the nuget package to install, will be given on first build
, nugetSha256 ? ""
# Additional nuget deps needed by the tool package
, nugetDeps ? (_: [])
# Executables to wrap into `$out/bin`, same as in `buildDotnetModule`, but with
# a default of `pname` instead of null, to avoid auto-wrapping everything
, executables ? pname
# The dotnet runtime to use, dotnet tools need a full SDK to function
, dotnet-runtime ? dotnet-sdk
, ...
} @ args:
buildDotnetModule (args // {
inherit pname version dotnet-runtime executables;
src = emptyDirectory;
nugetDeps = mkNugetDeps {
name = pname;
nugetDeps = { fetchNuGet }: [
(fetchNuGet { pname = nugetName; inherit version; sha256 = nugetSha256; })
] ++ (nugetDeps fetchNuGet);
};
projectFile = "";
useDotnetFromEnv = true;
dontBuild = true;
installPhase = ''
runHook preInstall
dotnet tool install --tool-path $out/lib/${pname} ${nugetName}
# remove files that contain nix store paths to temp nuget sources we made
find $out -name 'project.assets.json' -delete
find $out -name '.nupkg.metadata' -delete
runHook postInstall
'';
})

View file

@ -0,0 +1,323 @@
{ lib
, stdenvNoCC
, callPackage
, writeShellScript
, srcOnly
, linkFarmFromDrvs
, symlinkJoin
, makeWrapper
, dotnetCorePackages
, mkNugetSource
, mkNugetDeps
, nuget-to-nix
, cacert
, coreutils
, runtimeShellPackage
}:
{ name ? "${args.pname}-${args.version}"
, pname ? name
, enableParallelBuilding ? true
, doCheck ? false
# Flags to pass to `makeWrapper`. This is done to avoid double wrapping.
, makeWrapperArgs ? [ ]
# Flags to pass to `dotnet restore`.
, dotnetRestoreFlags ? [ ]
# Flags to pass to `dotnet build`.
, dotnetBuildFlags ? [ ]
# Flags to pass to `dotnet test`, if running tests is enabled.
, dotnetTestFlags ? [ ]
# Flags to pass to `dotnet install`.
, dotnetInstallFlags ? [ ]
# Flags to pass to `dotnet pack`.
, dotnetPackFlags ? [ ]
# Flags to pass to dotnet in all phases.
, dotnetFlags ? [ ]
# The path to publish the project to. When unset, the directory "$out/lib/$pname" is used.
, installPath ? null
# The binaries that should get installed to `$out/bin`, relative to `$installPath/`. These get wrapped accordingly.
# Unfortunately, dotnet has no method for doing this automatically.
# If unset, all executables in the projects root will get installed. This may cause bloat!
, executables ? null
# Packs a project as a `nupkg`, and installs it to `$out/share`. If set to `true`, the derivation can be used as a dependency for another dotnet project by adding it to `projectReferences`.
, packNupkg ? false
# The packages project file, which contains instructions on how to compile it. This can be an array of multiple project files as well.
, projectFile ? null
# The NuGet dependency file. This locks all NuGet dependency versions, as otherwise they cannot be deterministically fetched.
# This can be generated by running the `passthru.fetch-deps` script.
, nugetDeps ? null
# A list of derivations containing nupkg packages for local project references.
# Referenced derivations can be built with `buildDotnetModule` with `packNupkg=true` flag.
# Since we are sharing them as nugets they must be added to csproj/fsproj files as `PackageReference` as well.
# For example, your project has a local dependency:
# <ProjectReference Include="../foo/bar.fsproj" />
# To enable discovery through `projectReferences` you would need to add a line:
# <ProjectReference Include="../foo/bar.fsproj" />
# <PackageReference Include="bar" Version="*" Condition=" '$(ContinuousIntegrationBuild)'=='true' "/>
, projectReferences ? [ ]
# Libraries that need to be available at runtime should be passed through this.
# These get wrapped into `LD_LIBRARY_PATH`.
, runtimeDeps ? [ ]
# The dotnet runtime ID. If null, fetch-deps will gather dependencies for all
# platforms in meta.platforms which are supported by the sdk.
, runtimeId ? null
# Tests to disable. This gets passed to `dotnet test --filter "FullyQualifiedName!={}"`, to ensure compatibility with all frameworks.
# See https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-test#filter-option-details for more details.
, disabledTests ? [ ]
# The project file to run unit tests against. This is usually referenced in the regular project file, but sometimes it needs to be manually set.
# It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this.
, testProjectFile ? ""
# The type of build to perform. This is passed to `dotnet` with the `--configuration` flag. Possible values are `Release`, `Debug`, etc.
, buildType ? "Release"
# If set to true, builds the application as a self-contained - removing the runtime dependency on dotnet
, selfContainedBuild ? false
# Whether to use an alternative wrapper, that executes the application DLL using the dotnet runtime from the user environment. `dotnet-runtime` is provided as a default in case no .NET is installed
# This is useful for .NET tools and applications that may need to run under different .NET runtimes
, useDotnetFromEnv ? false
# Whether to explicitly enable UseAppHost when building. This is redundant if useDotnetFromEnv is enabledz
, useAppHost ? true
# The dotnet SDK to use.
, dotnet-sdk ? dotnetCorePackages.sdk_6_0
# The dotnet runtime to use.
, dotnet-runtime ? dotnetCorePackages.runtime_6_0
# The dotnet SDK to run tests against. This can differentiate from the SDK compiled against.
, dotnet-test-sdk ? dotnet-sdk
, ...
} @ args:
let
platforms =
if args ? meta.platforms
then lib.intersectLists args.meta.platforms dotnet-sdk.meta.platforms
else dotnet-sdk.meta.platforms;
inherit (callPackage ./hooks {
inherit dotnet-sdk dotnet-test-sdk disabledTests nuget-source dotnet-runtime runtimeDeps buildType;
runtimeId =
if runtimeId != null
then runtimeId
else dotnetCorePackages.systemToDotnetRid stdenvNoCC.targetPlatform.system;
}) dotnetConfigureHook dotnetBuildHook dotnetCheckHook dotnetInstallHook dotnetFixupHook;
localDeps =
if (projectReferences != [ ])
then linkFarmFromDrvs "${name}-project-references" projectReferences
else null;
_nugetDeps =
if (nugetDeps != null) then
if lib.isDerivation nugetDeps
then nugetDeps
else mkNugetDeps {
inherit name;
sourceFile = nugetDeps;
}
else throw "Defining the `nugetDeps` attribute is required, as to lock the NuGet dependencies. This file can be generated by running the `passthru.fetch-deps` script.";
# contains the actual package dependencies
dependenciesSource = mkNugetSource {
name = "${name}-dependencies-source";
description = "A Nuget source with the dependencies for ${name}";
deps = [ _nugetDeps ] ++ lib.optional (localDeps != null) localDeps;
};
# this contains all the nuget packages that are implicitly referenced by the dotnet
# build system. having them as separate deps allows us to avoid having to regenerate
# a packages dependencies when the dotnet-sdk version changes
sdkDeps = lib.lists.flatten [ dotnet-sdk.packages ];
sdkSource = let
version = dotnet-sdk.version or (lib.concatStringsSep "-" dotnet-sdk.versions);
in mkNugetSource {
name = "dotnet-sdk-${version}-source";
deps = sdkDeps;
};
nuget-source = symlinkJoin {
name = "${name}-nuget-source";
paths = [ dependenciesSource sdkSource ];
};
nugetDepsFile = _nugetDeps.sourceFile;
in
stdenvNoCC.mkDerivation (args // {
nativeBuildInputs = args.nativeBuildInputs or [ ] ++ [
dotnetConfigureHook
dotnetBuildHook
dotnetCheckHook
dotnetInstallHook
dotnetFixupHook
cacert
makeWrapper
dotnet-sdk
];
# Parse the version attr into a format acceptable for the Version msbuild property
# The actual version attr is saved in InformationalVersion, which accepts an arbitrary string
versionForDotnet = if !(lib.hasAttr "version" args) || args.version == null
then null else let
components = lib.pipe args.version [
lib.splitVersion
(lib.filter (x: (lib.strings.match "[0-9]+" x) != null))
(lib.filter (x: (lib.toIntBase10 x) < 65535)) # one version component in dotnet has to fit in 16 bits
];
in if (lib.length components) == 0
then null
else lib.concatStringsSep "." ((lib.take 4 components)
++ (if (lib.length components) < 4
then lib.replicate (4 - (lib.length components)) "0"
else [ ]));
makeWrapperArgs = args.makeWrapperArgs or [ ] ++ [
"--prefix LD_LIBRARY_PATH : ${dotnet-sdk.icu}/lib"
];
# Stripping breaks the executable
dontStrip = args.dontStrip or true;
# gappsWrapperArgs gets included when wrapping for dotnet, as to avoid double wrapping
dontWrapGApps = args.dontWrapGApps or true;
inherit selfContainedBuild useAppHost useDotnetFromEnv;
passthru = {
inherit nuget-source;
} // lib.optionalAttrs (!lib.isDerivation nugetDeps) {
fetch-deps =
let
flags = dotnetFlags ++ dotnetRestoreFlags;
runtimeIds =
if runtimeId != null
then [ runtimeId ]
else map (system: dotnetCorePackages.systemToDotnetRid system) platforms;
defaultDepsFile =
# Wire in the nugetDeps file such that running the script with no args
# runs it agains the correct deps file by default.
# Note that toString is necessary here as it results in the path at
# eval time (i.e. to the file in your local Nixpkgs checkout) rather
# than the Nix store path of the path after it's been imported.
if lib.isPath nugetDepsFile && !lib.hasPrefix "${builtins.storeDir}/" (toString nugetDepsFile)
then toString nugetDepsFile
else ''$(mktemp -t "${pname}-deps-XXXXXX.nix")'';
in
writeShellScript "fetch-${pname}-deps" ''
set -euo pipefail
export PATH="${lib.makeBinPath [ coreutils runtimeShellPackage dotnet-sdk (nuget-to-nix.override { inherit dotnet-sdk; }) ]}"
for arg in "$@"; do
case "$arg" in
--keep-sources|-k)
keepSources=1
shift
;;
--help|-h)
echo "usage: $0 [--keep-sources] [--help] <output path>"
echo " <output path> The path to write the lockfile to. A temporary file is used if this is not set"
echo " --keep-sources Dont remove temporary directories upon exit, useful for debugging"
echo " --help Show this help message"
exit
;;
esac
done
if [[ ''${TMPDIR:-} == /run/user/* ]]; then
# /run/user is usually a tmpfs in RAM, which may be too small
# to store all downloaded dotnet packages
unset TMPDIR
fi
export tmp=$(mktemp -td "deps-${pname}-XXXXXX")
HOME=$tmp/home
exitTrap() {
test -n "''${ranTrap-}" && return
ranTrap=1
if test -n "''${keepSources-}"; then
echo -e "Path to the source: $tmp/src\nPath to the fake home: $tmp/home"
else
rm -rf "$tmp"
fi
# Since mktemp is used this will be empty if the script didnt succesfully complete
if ! test -s "$depsFile"; then
rm -rf "$depsFile"
fi
}
trap exitTrap EXIT INT TERM
dotnetRestore() {
local -r project="''${1-}"
local -r rid="$2"
dotnet restore ''${project-} \
-p:ContinuousIntegrationBuild=true \
-p:Deterministic=true \
--packages "$tmp/nuget_pkgs" \
--runtime "$rid" \
--no-cache \
--force \
${lib.optionalString (!enableParallelBuilding) "--disable-parallel"} \
${lib.optionalString (flags != []) (toString flags)}
}
declare -a projectFiles=( ${toString (lib.toList projectFile)} )
declare -a testProjectFiles=( ${toString (lib.toList testProjectFile)} )
export DOTNET_NOLOGO=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
depsFile=$(realpath "''${1:-${defaultDepsFile}}")
echo Will write lockfile to "$depsFile"
mkdir -p "$tmp/nuget_pkgs"
storeSrc="${srcOnly args}"
src=$tmp/src
cp -rT "$storeSrc" "$src"
chmod -R +w "$src"
cd "$src"
echo "Restoring project..."
${dotnet-sdk}/bin/dotnet tool restore
cp -r $HOME/.nuget/packages/* $tmp/nuget_pkgs || true
for rid in "${lib.concatStringsSep "\" \"" runtimeIds}"; do
(( ''${#projectFiles[@]} == 0 )) && dotnetRestore "" "$rid"
for project in ''${projectFiles[@]-} ''${testProjectFiles[@]-}; do
dotnetRestore "$project" "$rid"
done
done
# Second copy, makes sure packages restored by ie. paket are included
cp -r $HOME/.nuget/packages/* $tmp/nuget_pkgs || true
echo "Succesfully restored project"
echo "Writing lockfile..."
excluded_sources="${lib.concatStringsSep " " sdkDeps}"
for excluded_source in ''${excluded_sources[@]}; do
ls "$excluded_source" >> "$tmp/excluded_list"
done
tmpFile="$tmp"/deps.nix
echo -e "# This file was automatically generated by passthru.fetch-deps.\n# Please dont edit it manually, your changes might get overwritten!\n" > "$tmpFile"
nuget-to-nix "$tmp/nuget_pkgs" "$tmp/excluded_list" >> "$tmpFile"
mv "$tmpFile" "$depsFile"
echo "Succesfully wrote lockfile to $depsFile"
'';
} // args.passthru or { };
meta = (args.meta or { }) // { inherit platforms; };
}
# ICU tries to unconditionally load files from /usr/share/icu on Darwin, which makes builds fail
# in the sandbox, so disable ICU on Darwin. This, as far as I know, shouldn't cause any built packages
# to behave differently, just the dotnet build tool.
// lib.optionalAttrs stdenvNoCC.isDarwin { DOTNET_SYSTEM_GLOBALIZATION_INVARIANT = 1; })

View file

@ -0,0 +1,89 @@
{ lib
, stdenv
, which
, coreutils
, zlib
, openssl
, callPackage
, makeSetupHook
, makeWrapper
, dotnet-sdk
, dotnet-test-sdk
, disabledTests
, nuget-source
, dotnet-runtime
, runtimeDeps
, buildType
, runtimeId
}:
assert (builtins.isString runtimeId);
let
libraryPath = lib.makeLibraryPath runtimeDeps;
in
{
dotnetConfigureHook = callPackage ({ }:
makeSetupHook {
name = "dotnet-configure-hook";
propagatedBuildInputs = [ dotnet-sdk nuget-source ];
substitutions = {
nugetSource = nuget-source;
dynamicLinker = "${stdenv.cc}/nix-support/dynamic-linker";
libPath = lib.makeLibraryPath [
stdenv.cc.cc.lib
stdenv.cc.libc
dotnet-sdk.passthru.icu
zlib
openssl
];
inherit runtimeId;
};
} ./dotnet-configure-hook.sh) { };
dotnetBuildHook = callPackage ({ }:
makeSetupHook {
name = "dotnet-build-hook";
propagatedBuildInputs = [ dotnet-sdk ];
substitutions = {
inherit buildType runtimeId;
};
} ./dotnet-build-hook.sh) { };
dotnetCheckHook = callPackage ({ }:
makeSetupHook {
name = "dotnet-check-hook";
propagatedBuildInputs = [ dotnet-test-sdk ];
substitutions = {
inherit buildType runtimeId libraryPath;
disabledTests = lib.optionalString (disabledTests != [])
(let
escapedNames = lib.lists.map (n: lib.replaceStrings [","] ["%2C"] n) disabledTests;
filters = lib.lists.map (n: "FullyQualifiedName!=${n}") escapedNames;
in
"${lib.concatStringsSep "&" filters}");
};
} ./dotnet-check-hook.sh) { };
dotnetInstallHook = callPackage ({ }:
makeSetupHook {
name = "dotnet-install-hook";
propagatedBuildInputs = [ dotnet-sdk ];
substitutions = {
inherit buildType runtimeId;
};
} ./dotnet-install-hook.sh) { };
dotnetFixupHook = callPackage ({ }:
makeSetupHook {
name = "dotnet-fixup-hook";
propagatedBuildInputs = [ dotnet-runtime ];
substitutions = {
dotnetRuntime = dotnet-runtime;
runtimeDeps = libraryPath;
shell = stdenv.shell;
which = "${which}/bin/which";
dirname = "${coreutils}/bin/dirname";
realpath = "${coreutils}/bin/realpath";
};
} ./dotnet-fixup-hook.sh) { };
}

View file

@ -0,0 +1,70 @@
# inherit arguments from derivation
dotnetBuildFlags=( ${dotnetBuildFlags[@]-} )
dotnetBuildHook() {
echo "Executing dotnetBuildHook"
runHook preBuild
if [ "${enableParallelBuilding-}" ]; then
local -r maxCpuFlag="$NIX_BUILD_CORES"
local -r parallelBuildFlag="true"
else
local -r maxCpuFlag="1"
local -r parallelBuildFlag="false"
fi
if [ "${selfContainedBuild-}" ]; then
dotnetBuildFlags+=("-p:SelfContained=true")
else
dotnetBuildFlags+=("-p:SelfContained=false")
fi
if [ "${useAppHost-}" ]; then
dotnetBuildFlags+=("-p:UseAppHost=true")
fi
local versionFlags=()
if [ "${version-}" ]; then
versionFlags+=("-p:InformationalVersion=${version-}")
fi
if [ "${versionForDotnet-}" ]; then
versionFlags+=("-p:Version=${versionForDotnet-}")
fi
dotnetBuild() {
local -r project="${1-}"
runtimeIdFlags=()
if [[ "$project" == *.csproj ]] || [ "${selfContainedBuild-}" ]; then
runtimeIdFlags+=("--runtime @runtimeId@")
fi
env dotnet build ${project-} \
-maxcpucount:$maxCpuFlag \
-p:BuildInParallel=$parallelBuildFlag \
-p:ContinuousIntegrationBuild=true \
-p:Deterministic=true \
--configuration "@buildType@" \
--no-restore \
${versionFlags[@]} \
${runtimeIdFlags[@]} \
${dotnetBuildFlags[@]} \
${dotnetFlags[@]}
}
(( "${#projectFile[@]}" == 0 )) && dotnetBuild
for project in ${projectFile[@]} ${testProjectFile[@]-}; do
dotnetBuild "$project"
done
runHook postBuild
echo "Finished dotnetBuildHook"
}
if [[ -z "${dontDotnetBuild-}" && -z "${buildPhase-}" ]]; then
buildPhase=dotnetBuildHook
fi

View file

@ -0,0 +1,46 @@
# inherit arguments from derivation
dotnetTestFlags=( ${dotnetTestFlags[@]-} )
dotnetCheckHook() {
echo "Executing dotnetCheckHook"
runHook preCheck
if [ "${disabledTests-}" ]; then
local -r disabledTestsFlag="--filter @disabledTests@"
fi
if [ "${enableParallelBuilding-}" ]; then
local -r maxCpuFlag="$NIX_BUILD_CORES"
else
local -r maxCpuFlag="1"
fi
for project in ${testProjectFile[@]-${projectFile[@]}}; do
runtimeIdFlags=()
if [[ "$project" == *.csproj ]]; then
runtimeIdFlags=("--runtime @runtimeId@")
fi
env "LD_LIBRARY_PATH=@libraryPath@" \
dotnet test "$project" \
-maxcpucount:$maxCpuFlag \
-p:ContinuousIntegrationBuild=true \
-p:Deterministic=true \
--configuration "@buildType@" \
--no-build \
--logger "console;verbosity=normal" \
${disabledTestsFlag-} \
${runtimeIdFlags[@]} \
"${dotnetTestFlags[@]}" \
"${dotnetFlags[@]}"
done
runHook postCheck
echo "Finished dotnetCheckHook"
}
if [[ -z "${dontDotnetCheck-}" && -z "${checkPhase-}" ]]; then
checkPhase=dotnetCheckHook
fi

View file

@ -0,0 +1,82 @@
declare -a projectFile testProjectFile
# Inherit arguments from derivation
dotnetFlags=( ${dotnetFlags[@]-} )
dotnetRestoreFlags=( ${dotnetRestoreFlags[@]-} )
dotnetConfigureHook() {
echo "Executing dotnetConfigureHook"
runHook preConfigure
if [ -z "${enableParallelBuilding-}" ]; then
local -r parallelFlag="--disable-parallel"
fi
dotnetRestore() {
local -r project="${1-}"
env dotnet restore ${project-} \
-p:ContinuousIntegrationBuild=true \
-p:Deterministic=true \
--runtime "@runtimeId@" \
--source "@nugetSource@/lib" \
${parallelFlag-} \
${dotnetRestoreFlags[@]} \
${dotnetFlags[@]}
}
# Generate a NuGet.config file to make sure everything,
# including things like <Sdk /> dependencies, is restored from the proper source
cat <<EOF > "./NuGet.config"
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<clear />
<add key="nugetSource" value="@nugetSource@/lib" />
</packageSources>
</configuration>
EOF
# Patch paket.dependencies and paket.lock (if found) to use the proper source. This ensures
# paket restore works correctly
# We use + instead of / in sed to avoid problems with slashes
find -name paket.dependencies -exec sed -i 's+source .*+source @nugetSource@/lib+g' {} \;
find -name paket.lock -exec sed -i 's+remote:.*+remote: @nugetSource@/lib+g' {} \;
env dotnet tool restore --add-source "@nugetSource@/lib"
(( "${#projectFile[@]}" == 0 )) && dotnetRestore
for project in ${projectFile[@]} ${testProjectFile[@]-}; do
dotnetRestore "$project"
done
echo "Fixing up native binaries..."
# Find all native binaries and nuget libraries, and fix them up,
# by setting the proper interpreter and rpath to some commonly used libraries
for binary in $(find "$HOME/.nuget/packages/" -type f -executable); do
if patchelf --print-interpreter "$binary" >/dev/null 2>/dev/null; then
echo "Found binary: $binary, fixing it up..."
patchelf --set-interpreter "$(cat "@dynamicLinker@")" "$binary"
# This makes sure that if the binary requires some specific runtime dependencies, it can find it.
# This fixes dotnet-built binaries like crossgen2
patchelf \
--add-needed libicui18n.so \
--add-needed libicuuc.so \
--add-needed libz.so \
--add-needed libssl.so \
"$binary"
patchelf --set-rpath "@libPath@" "$binary"
fi
done
runHook postConfigure
echo "Finished dotnetConfigureHook"
}
if [[ -z "${dontDotnetConfigure-}" && -z "${configurePhase-}" ]]; then
configurePhase=dotnetConfigureHook
fi

View file

@ -0,0 +1,57 @@
# Inherit arguments from the derivation
declare -a derivationMakeWrapperArgs="( ${makeWrapperArgs-} )"
makeWrapperArgs=( "${derivationMakeWrapperArgs[@]}" )
# First argument is the executable you want to wrap,
# the second is the destination for the wrapper.
wrapDotnetProgram() {
local dotnetRootFlags=()
if [ ! "${selfContainedBuild-}" ]; then
if [ "${useDotnetFromEnv-}" ]; then
# if dotnet CLI is available, set DOTNET_ROOT based on it. Otherwise set to default .NET runtime
dotnetRootFlags+=("--run" 'command -v dotnet &>/dev/null && export DOTNET_ROOT="$(@dirname@ "$(@realpath@ "$(@which@ dotnet)")")" || export DOTNET_ROOT="@dotnetRuntime@"')
dotnetRootFlags+=("--suffix" "PATH" ":" "@dotnetRuntime@/bin")
else
dotnetRootFlags+=("--set" "DOTNET_ROOT" "@dotnetRuntime@")
dotnetRootFlags+=("--prefix" "PATH" ":" "@dotnetRuntime@/bin")
fi
fi
makeWrapper "$1" "$2" \
--suffix "LD_LIBRARY_PATH" : "@runtimeDeps@" \
"${dotnetRootFlags[@]}" \
"${gappsWrapperArgs[@]}" \
"${makeWrapperArgs[@]}"
echo "installed wrapper to "$2""
}
dotnetFixupHook() {
echo "Executing dotnetFixupPhase"
# check if executables is declared (including empty values, in which case we generate no executables)
if declare -p executables &>/dev/null; then
for executable in ${executables[@]}; do
path="${installPath-$out/lib/$pname}/$executable"
if test -x "$path"; then
wrapDotnetProgram "$path" "$out/bin/$(basename "$executable")"
else
echo "Specified binary \"$executable\" is either not an executable or does not exist!"
echo "Looked in $path"
exit 1
fi
done
else
while IFS= read -d '' executable; do
wrapDotnetProgram "$executable" "$out/bin/$(basename "$executable")" \;
done < <(find "${installPath-$out/lib/$pname}" ! -name "*.dll" -executable -type f -print0)
fi
echo "Finished dotnetFixupPhase"
}
if [[ -z "${dontDotnetFixup-}" ]]; then
preFixupPhases+=" dotnetFixupHook"
fi

View file

@ -0,0 +1,79 @@
# inherit arguments from derivation
dotnetInstallFlags=( ${dotnetInstallFlags[@]-} )
dotnetInstallHook() {
echo "Executing dotnetInstallHook"
runHook preInstall
if [ "${selfContainedBuild-}" ]; then
dotnetInstallFlags+=("--self-contained")
else
dotnetInstallFlags+=("--no-self-contained")
# https://learn.microsoft.com/en-us/dotnet/core/deploying/trimming/trim-self-contained
# Trimming is only available for self-contained build, so force disable it here
dotnetInstallFlags+=("-p:PublishTrimmed=false")
fi
if [ "${useAppHost-}" ]; then
dotnetInstallFlags+=("-p:UseAppHost=true")
fi
dotnetPublish() {
local -r project="${1-}"
runtimeIdFlags=()
if [[ "$project" == *.csproj ]] || [ "${selfContainedBuild-}" ]; then
runtimeIdFlags+=("--runtime @runtimeId@")
fi
env dotnet publish ${project-} \
-p:ContinuousIntegrationBuild=true \
-p:Deterministic=true \
--output "${installPath-$out/lib/$pname}" \
--configuration "@buildType@" \
--no-build \
${runtimeIdFlags[@]} \
${dotnetInstallFlags[@]} \
${dotnetFlags[@]}
}
dotnetPack() {
local -r project="${1-}"
env dotnet pack ${project-} \
-p:ContinuousIntegrationBuild=true \
-p:Deterministic=true \
--output "$out/share" \
--configuration "@buildType@" \
--no-build \
--runtime "@runtimeId@" \
${dotnetPackFlags[@]} \
${dotnetFlags[@]}
}
if (( "${#projectFile[@]}" == 0 )); then
dotnetPublish
else
for project in ${projectFile[@]}; do
dotnetPublish "$project"
done
fi
if [[ "${packNupkg-}" ]]; then
if (( "${#projectFile[@]}" == 0 )); then
dotnetPack
else
for project in ${projectFile[@]}; do
dotnetPack "$project"
done
fi
fi
runHook postInstall
echo "Finished dotnetInstallHook"
}
if [[ -z "${dontDotnetInstall-}" && -z "${installPhase-}" ]]; then
installPhase=dotnetInstallHook
fi

View file

@ -0,0 +1,116 @@
{ stdenv, lib, makeWrapper, pkg-config, mono, dotnetbuildhelpers }:
attrsOrig @
{ pname
, version
, nativeBuildInputs ? []
, xBuildFiles ? [ ]
, xBuildFlags ? [ "/p:Configuration=Release" ]
, outputFiles ? [ "bin/Release/*" ]
, dllFiles ? [ "*.dll" ]
, exeFiles ? [ "*.exe" ]
# Additional arguments to pass to the makeWrapper function, which wraps
# generated binaries.
, makeWrapperArgs ? [ ]
, ... }:
let
arrayToShell = (a: toString (map (lib.escape (lib.stringToCharacters "\\ ';$`()|<>\t") ) a));
attrs = {
inherit pname version;
nativeBuildInputs = [
pkg-config
makeWrapper
dotnetbuildhelpers
mono
] ++ nativeBuildInputs;
configurePhase = ''
runHook preConfigure
[ -z "''${dontPlacateNuget-}" ] && placate-nuget.sh
[ -z "''${dontPlacatePaket-}" ] && placate-paket.sh
[ -z "''${dontPatchFSharpTargets-}" ] && patch-fsharp-targets.sh
runHook postConfigure
'';
buildPhase = ''
runHook preBuild
echo Building dotNET packages...
# Probably needs to be moved to fsharp
if pkg-config FSharp.Core
then
export FSharpTargetsPath="$(dirname $(pkg-config FSharp.Core --variable=Libraries))/Microsoft.FSharp.Targets"
fi
ran=""
for xBuildFile in ${arrayToShell xBuildFiles} ''${xBuildFilesExtra}
do
ran="yes"
xbuild ${arrayToShell xBuildFlags} ''${xBuildFlagsArray} $xBuildFile
done
[ -z "$ran" ] && xbuild ${arrayToShell xBuildFlags} ''${xBuildFlagsArray}
runHook postBuild
'';
dontStrip = true;
installPhase = ''
runHook preInstall
target="$out/lib/dotnet/${pname}"
mkdir -p "$target"
cp -rv ${arrayToShell outputFiles} "''${outputFilesArray[@]}" "$target"
if [ -z "''${dontRemoveDuplicatedDlls-}" ]
then
pushd "$out"
remove-duplicated-dlls.sh
popd
fi
set -f
for dllPattern in ${arrayToShell dllFiles} ''${dllFilesArray[@]}
do
set +f
for dll in "$target"/$dllPattern
do
[ -f "$dll" ] || continue
if pkg-config $(basename -s .dll "$dll")
then
echo "$dll already exported by a buildInputs, not re-exporting"
else
create-pkg-config-for-dll.sh "$out/lib/pkgconfig" "$dll"
fi
done
done
set -f
for exePattern in ${arrayToShell exeFiles} ''${exeFilesArray[@]}
do
set +f
for exe in "$target"/$exePattern
do
[ -f "$exe" ] || continue
mkdir -p "$out"/bin
commandName="$(basename -s .exe "$(echo "$exe" | tr "[A-Z]" "[a-z]")")"
makeWrapper \
"${mono}/bin/mono" \
"$out"/bin/"$commandName" \
--add-flags "\"$exe\"" \
''${makeWrapperArgs}
done
done
runHook postInstall
'';
};
in
stdenv.mkDerivation (attrs // (builtins.removeAttrs attrsOrig [ "nativeBuildInputs" ] ))

View file

@ -0,0 +1,23 @@
#!/usr/bin/env bash
targetDir="$1"
dllFullPath="$2"
dllVersion="$(monodis --assembly "$dllFullPath" | grep ^Version: | cut -f 2 -d : | xargs)"
[ -z "$dllVersion" ] && echo "Defaulting dllVersion to 0.0.0" && dllVersion="0.0.0"
dllFileName="$(basename $dllFullPath)"
dllRootName="$(basename -s .dll $dllFileName)"
targetPcFile="$targetDir"/"$dllRootName".pc
mkdir -p "$targetDir"
cat > $targetPcFile << EOF
Libraries=$dllFullPath
Name: $dllRootName
Description: $dllRootName
Version: $dllVersion
Libs: -r:$dllFileName
EOF
echo "Created $targetPcFile"

View file

@ -0,0 +1,18 @@
{ runCommand, mono, pkg-config }:
runCommand
"dotnetbuildhelpers"
{ preferLocalBuild = true; }
''
target="$out/bin"
mkdir -p "$target"
for script in ${./create-pkg-config-for-dll.sh} ${./patch-fsharp-targets.sh} ${./remove-duplicated-dlls.sh} ${./placate-nuget.sh} ${./placate-paket.sh}
do
scriptName="$(basename "$script" | cut -f 2- -d -)"
cp -v "$script" "$target"/"$scriptName"
chmod 755 "$target"/"$scriptName"
patchShebangs "$target"/"$scriptName"
substituteInPlace "$target"/"$scriptName" --replace pkg-config ${pkg-config}/bin/${pkg-config.targetPrefix}pkg-config
substituteInPlace "$target"/"$scriptName" --replace monodis ${mono}/bin/monodis
done
''

View file

@ -0,0 +1,20 @@
#!/bin/bash
# Some project files look for F# targets in $(FSharpTargetsPath)
# so it's a good idea to add something like this to your ~/.bash_profile:
# export FSharpTargetsPath=$(dirname $(which fsharpc))/../lib/mono/4.0/Microsoft.FSharp.Targets
# In build scripts, you would add somehting like this:
# export FSharpTargetsPath="${fsharp}/lib/mono/4.0/Microsoft.FSharp.Targets"
# However, some project files look for F# targets in the main Mono directory. When that happens
# patch the project files using this script so they will look in $(FSharpTargetsPath) instead.
echo "Patching F# targets in fsproj files..."
find -iname \*.fsproj -print -exec \
sed --in-place=.bak \
-e 's,<FSharpTargetsPath>\([^<]*\)</FSharpTargetsPath>,<FSharpTargetsPath Condition="Exists('\'\\1\'')">\1</FSharpTargetsPath>,'g \
{} \;

View file

@ -0,0 +1,7 @@
#!/usr/bin/env bash
echo Placating Nuget in nuget.targets
find -iname nuget.targets -print -exec sed --in-place=bak -e 's,mono --runtime[^<]*,true NUGET PLACATED BY buildDotnetPackage,g' {} \;
echo Just to be sure, replacing Nuget executables by empty files.
find . -iname nuget.exe \! -size 0 -exec mv -v {} {}.bak \; -exec touch {} \;

Some files were not shown because too many files have changed in this diff Show more