git-cinnabar-0.7.0/.cargo/config.toml000064400000000000000000000000671046102023000155040ustar 00000000000000[build] rustflags = [ "-Cforce-unwind-tables=yes", ] git-cinnabar-0.7.0/.cargo_vcs_info.json0000644000000001360000000000100133760ustar { "git": { "sha1": "ac9daedfe7657348fe19f7190f911545aae65d15" }, "path_in_vcs": "" }git-cinnabar-0.7.0/Cargo.lock0000644000001077360000000000100113670ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] name = "adler2" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "anstream" version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", "once_cell", "windows-sys 0.59.0", ] [[package]] name = "arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] [[package]] name = "array-init" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" [[package]] name = "backtrace" version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", "windows-targets", ] [[package]] name = "bit-vec" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bstr" version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0" dependencies = [ "memchr", "serde", ] [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bzip2" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b89e7c29231c673a61a46e722602bcd138298f6b9e81e71119693534585f5c" dependencies = [ "bzip2-sys", ] [[package]] name = "bzip2-sys" version = "0.1.12+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72ebc2f1a417f01e1da30ef264ee86ae31d2dcd2d603ea283d3c244a883ca2a9" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "cc" version = "1.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" dependencies = [ "jobserver", "libc", "shlex", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", ] [[package]] name = "clap_builder" version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", "syn", ] [[package]] name = "clap_lex" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "colorchoice" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "concat_const" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "105f2bacffc2751fc3f4f2b69b9c2d6f793d8807f03ee733e89a7baa8c9f8c42" [[package]] name = "cpufeatures" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "cstr" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68523903c8ae5aacfa32a0d9ae60cadeb764e1da14ee0d26b1f3089f13a54636" dependencies = [ "proc-macro2", "quote", ] [[package]] name = "curl-sys" version = "0.4.80+curl-8.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55f7df2eac63200c3ab25bde3b2268ef2ee56af3d238e76d61f01c3c49bff734" dependencies = [ "cc", "libc", "libz-sys", "openssl-sys", "pkg-config", "vcpkg", "windows-sys 0.52.0", ] [[package]] name = "derive_arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "derive_more" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", "syn", "unicode-xid", ] [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", ] [[package]] name = "displaydoc" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "either" version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", "windows-sys 0.59.0", ] [[package]] name = "fastrand" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "filetime" version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if", "libc", "libredox", "windows-sys 0.59.0", ] [[package]] name = "flate2" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", "libz-sys", "miniz_oxide", ] [[package]] name = "form_urlencoded" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "getset" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3586f256131df87204eb733da72e3d3eb4f343c639f4b7be279ac7c48baeafe" dependencies = [ "proc-macro-error2", "proc-macro2", "quote", "syn", ] [[package]] name = "gimli" version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git-cinnabar" version = "0.7.0" dependencies = [ "array-init", "backtrace", "bit-vec", "bitflags", "bstr", "byteorder", "bzip2", "cc", "cfg-if", "clap", "concat_const", "cstr", "curl-sys", "derive_more", "digest", "either", "flate2", "getset", "git-version", "hex", "hex-literal", "indexmap", "itertools", "libc", "libz-sys", "log", "lru", "make-cmd", "mio", "once_cell", "percent-encoding", "rand", "regex", "semver", "sha1", "shared_child", "syn", "tar", "target", "tee", "tempfile", "typenum", "url", "windows-sys 0.59.0", "xz2", "zip", "zstd", ] [[package]] name = "git-version" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" dependencies = [ "git-version-macro", ] [[package]] name = "git-version-macro" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "icu_collections" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" dependencies = [ "displaydoc", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_locid" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" dependencies = [ "displaydoc", "litemap", "tinystr", "writeable", "zerovec", ] [[package]] name = "icu_locid_transform" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" dependencies = [ "displaydoc", "icu_locid", "icu_locid_transform_data", "icu_provider", "tinystr", "zerovec", ] [[package]] name = "icu_locid_transform_data" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" [[package]] name = "icu_normalizer" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" dependencies = [ "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", "utf16_iter", "utf8_iter", "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" [[package]] name = "icu_properties" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" dependencies = [ "displaydoc", "icu_collections", "icu_locid_transform", "icu_properties_data", "icu_provider", "tinystr", "zerovec", ] [[package]] name = "icu_properties_data" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" [[package]] name = "icu_provider" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" dependencies = [ "displaydoc", "icu_locid", "icu_provider_macros", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", "zerovec", ] [[package]] name = "icu_provider_macros" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "idna" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ "idna_adapter", "smallvec", "utf8_iter", ] [[package]] name = "idna_adapter" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ "icu_normalizer", "icu_properties", ] [[package]] name = "indexmap" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "jobserver" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "libc" version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libredox" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags", "libc", "redox_syscall", ] [[package]] name = "libz-sys" version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "linux-raw-sys" version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "log" version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "lru" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" [[package]] name = "lzma-sys" version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" dependencies = [ "cc", "libc", "pkg-config", ] [[package]] name = "make-cmd" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8ca8afbe8af1785e09636acb5a41e08a765f5f0340568716c18a8700ba3c0d3" [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miniz_oxide" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", ] [[package]] name = "mio" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", "wasi", "windows-sys 0.52.0", ] [[package]] name = "object" version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "openssl-sys" version = "0.9.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "percent-encoding" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pkg-config" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "ppv-lite86" version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ "zerocopy", ] [[package]] name = "proc-macro-error-attr2" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ "proc-macro2", "quote", ] [[package]] name = "proc-macro-error2" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", "syn", ] [[package]] name = "proc-macro2" version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.59.0", ] [[package]] name = "semver" version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "serde" version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "shared_child" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09fa9338aed9a1df411814a5b2252f7cd206c55ae9bf2fa763f8de84603aa60c" dependencies = [ "libc", "windows-sys 0.59.0", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "smallvec" version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "synstructure" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tar" version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" dependencies = [ "filetime", "libc", "xattr", ] [[package]] name = "target" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8f05f774b2db35bdad5a8237a90be1102669f8ea013fea9777b366d34ab145" [[package]] name = "tee" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c12559dba7383625faaff75be24becf35bfc885044375bcab931111799a3da" [[package]] name = "tempfile" version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", "getrandom", "once_cell", "rustix", "windows-sys 0.59.0", ] [[package]] name = "thiserror" version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tinystr" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" dependencies = [ "displaydoc", "zerovec", ] [[package]] name = "typenum" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "unicode-ident" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" [[package]] name = "unicode-xid" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "url" version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "utf16_iter" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" [[package]] name = "utf8_iter" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "write16" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" [[package]] name = "writeable" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "xattr" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" dependencies = [ "libc", "linux-raw-sys", "rustix", ] [[package]] name = "xz2" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" dependencies = [ "lzma-sys", ] [[package]] name = "yoke" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", "yoke-derive", "zerofrom", ] [[package]] name = "yoke-derive" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zerocopy" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zerofrom" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "zerovec" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", "zerovec-derive", ] [[package]] name = "zerovec-derive" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "zip" version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b280484c454e74e5fff658bbf7df8fdbe7a07c6b2de4a53def232c15ef138f3a" dependencies = [ "arbitrary", "crc32fast", "crossbeam-utils", "displaydoc", "flate2", "indexmap", "memchr", "thiserror", ] [[package]] name = "zstd" version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "7.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", ] git-cinnabar-0.7.0/Cargo.toml0000644000000111430000000000100113740ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.75.0" name = "git-cinnabar" version = "0.7.0" authors = ["Mike Hommey "] build = "build.rs" include = [ "/src", "/MPL-2.0", "/build.rs", "/.cargo", "/git-core/COPYING", "/git-core/LGPL-2.1", "/git-core/Makefile", "/git-core/detect-compiler", "/git-core/GIT-VERSION-GEN", "/git-core/GIT-VERSION-FILE.in", "/git-core/version-def.h.in", "/git-core/*.mak*", "/git-core/**/*.c", "/git-core/**/*.h", "!/git-core/t/**", ] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "git remote helper to interact with mercurial repositories" readme = "README.md" license = "MPL-2.0 AND GPL-2.0" repository = "https://github.com/glandium/git-cinnabar" [features] compile_commands = [] default = [ "full-version", "version-check", ] full-version = [ "dep:concat_const", "dep:git-version", ] gitdev = [] self-update = [ "shared_child", "dep:concat_const", "dep:tar", "dep:xz2", "dep:zip", "windows-sys/Win32_System_Threading", ] version-check = ["shared_child"] [[bin]] name = "git-cinnabar" path = "src/main.rs" [dependencies.array-init] version = "2.0.1" [dependencies.backtrace] version = "0.3" [dependencies.bit-vec] version = "0.8" [dependencies.bitflags] version = "2" [dependencies.bstr] version = "1" features = ["std"] default-features = false [dependencies.byteorder] version = "1" [dependencies.bzip2] version = "0.5" [dependencies.cfg-if] version = "1" [dependencies.clap] version = "4.2" features = [ "cargo", "derive", ] [dependencies.concat_const] version = "0.1" optional = true [dependencies.cstr] version = "0.2.10" [dependencies.derive_more] version = "1" features = [ "deref", "display", "debug", "from", "try_into", ] default-features = false [dependencies.digest] version = "0.10" [dependencies.either] version = "1" [dependencies.flate2] version = "1" features = ["zlib"] default-features = false [dependencies.getset] version = "0.1" [dependencies.git-version] version = "0.3" optional = true [dependencies.hex] version = "0.4" [dependencies.hex-literal] version = "0.4" [dependencies.indexmap] version = "2" [dependencies.itertools] version = "0.14" [dependencies.libc] version = "0.2" [dependencies.log] version = "0.4" features = ["std"] [dependencies.lru] version = "0.12" default-features = false [dependencies.mio] version = "1" features = [ "os-ext", "os-poll", ] [dependencies.once_cell] version = "1.13" [dependencies.percent-encoding] version = "2" [dependencies.rand] version = "0.8" [dependencies.regex] version = "1" features = ["std"] default-features = false [dependencies.semver] version = "1.0" [dependencies.sha1] version = "0.10" [dependencies.shared_child] version = "1.0" optional = true [dependencies.tee] version = "0.1" [dependencies.tempfile] version = "3" [dependencies.typenum] version = "1" [dependencies.url] version = "2" [dependencies.zstd] version = "0.13" default-features = false [dev-dependencies.tempfile] version = "3" [build-dependencies.cc] version = "1.0.46" [build-dependencies.itertools] version = "0.14" [build-dependencies.make-cmd] version = "0.1" [build-dependencies.syn] version = "2" features = ["full"] [build-dependencies.target] version = "2.0" [target."cfg(not(windows))".dependencies.curl-sys] version = "0.4" default-features = false [target."cfg(not(windows))".dependencies.libz-sys] version = "1" [target."cfg(not(windows))".dependencies.tar] version = "0.4" optional = true [target."cfg(not(windows))".dependencies.xz2] version = "0.1" optional = true [target."cfg(windows)".dependencies.curl-sys] version = "0.4" features = [ "ssl", "static-curl", ] default-features = false [target."cfg(windows)".dependencies.libz-sys] version = "1" features = ["static"] [target."cfg(windows)".dependencies.windows-sys] version = "0.59" features = ["Win32_Foundation"] [target."cfg(windows)".dependencies.zip] version = "2" features = ["deflate-zlib"] optional = true default-features = false [profile.dev] panic = "abort" [profile.release] codegen-units = 1 panic = "abort" git-cinnabar-0.7.0/Cargo.toml.orig000064400000000000000000000071321046102023000150600ustar 00000000000000# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. [package] name = "git-cinnabar" version = "0.7.0" description = "git remote helper to interact with mercurial repositories" authors = ["Mike Hommey "] edition = "2021" license = "MPL-2.0 AND GPL-2.0" repository = "https://github.com/glandium/git-cinnabar" rust-version = "1.75.0" include = [ "/src", "/MPL-2.0", "/build.rs", "/.cargo", "/git-core/COPYING", "/git-core/LGPL-2.1", "/git-core/Makefile", "/git-core/detect-compiler", "/git-core/GIT-VERSION-GEN", "/git-core/GIT-VERSION-FILE.in", "/git-core/version-def.h.in", "/git-core/*.mak*", "/git-core/**/*.c", "/git-core/**/*.h", "!/git-core/t/**", ] [dependencies] array-init = "2.0.1" backtrace = "0.3" bit-vec = "0.8" bitflags = "2" bzip2 = "0.5" byteorder = "1" cfg-if = "1" cstr = "0.2.10" digest = "0.10" either = "1" getset = "0.1" hex = "0.4" hex-literal = "0.4" indexmap = "2" itertools = "0.14" libc = "0.2" once_cell = "1.13" percent-encoding = "2" rand = "0.8" semver = "1.0" sha1 = "0.10" tee = "0.1" tempfile = "3" typenum = "1" url = "2" [dependencies.bstr] version = "1" default-features = false features = ["std"] [dependencies.clap] version = "4.2" features = ["cargo", "derive"] [dependencies.concat_const] version = "0.1" optional = true [target.'cfg(windows)'.dependencies.curl-sys] version = "0.4" default-features = false features = ["ssl", "static-curl"] [target.'cfg(not(windows))'.dependencies.curl-sys] version = "0.4" default-features = false [dependencies.derive_more] version = "1" default-features = false features = ["deref", "display", "debug", "from", "try_into"] [dependencies.flate2] version = "1" default-features = false features = ["zlib"] [dependencies.git-version] version = "0.3" optional = true [target.'cfg(windows)'.dependencies.libz-sys] version = "1" features = ["static"] [target.'cfg(not(windows))'.dependencies.libz-sys] version = "1" [dependencies.log] version = "0.4" features = ["std"] [dependencies.lru] version = "0.12" default-features = false [dependencies.mio] version = "1" features = ["os-ext", "os-poll"] [dependencies.regex] version = "1" default-features = false features = ["std"] [dependencies.shared_child] version = "1.0" optional = true [target.'cfg(not(windows))'.dependencies.tar] version = "0.4" optional = true [target.'cfg(windows)'.dependencies.windows-sys] version = "0.59" features = ["Win32_Foundation"] [target.'cfg(not(windows))'.dependencies.xz2] version = "0.1" optional = true [target.'cfg(windows)'.dependencies.zip] version = "2" default-features = false features = ["deflate-zlib"] optional = true [dependencies.zstd] version = "0.13" default-features = false [build-dependencies] cc = "1.0.46" itertools = "0.14" make-cmd = "0.1" target = "2.0" syn = { version = "2", features = ["full"] } # git-version fails to parse inner macros without this. [dev-dependencies] tempfile = "3" [profile.release] codegen-units = 1 panic = "abort" [profile.dev] panic = "abort" [features] default = ["full-version", "version-check"] full-version = ["dep:concat_const", "dep:git-version"] # Check and report when a new version is available. version-check = ["shared_child"] # Download and apply new versions. self-update = ["shared_child", "dep:concat_const", "dep:tar", "dep:xz2", "dep:zip", "windows-sys/Win32_System_Threading"] # Development features # Create compile_commands.json for IDE integration. compile_commands = [] # Enable libgit development options. gitdev = [] git-cinnabar-0.7.0/MPL-2.0000064400000000000000000000405251046102023000130440ustar 00000000000000Mozilla Public License Version 2.0 ================================== 1. Definitions -------------- 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions -------------------------------- 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities ------------------- 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination -------------- 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ 8. Litigation ------------- Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous ---------------- This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License --------------------------- 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice ------------------------------------------- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. git-cinnabar-0.7.0/README.md000064400000000000000000000262151046102023000134530ustar 00000000000000git-cinnabar 0.7 ================ *cinnabar is the common natural form in which mercury can be found on Earth. It contains mercury sulfide and its powder is used to make the vermillion pigment.* git-cinnabar is a git remote helper to interact with mercurial repositories. Contrary to other such helpers ([[1]](https://github.com/felipec/git-remote-hg) [[2]](https://github.com/rfk/git-remote-hg) [[3]](https://github.com/cosmin/git-hg) [[4]](https://github.com/todesschaf/git-hg) [[5]](https://github.com/msysgit/msysgit/wiki/Guide-to-git-remote-hg) [[6]](https://github.com/buchuki/gitifyhg/)), it doesn't use a local mercurial clone under the hood. The main focus at the moment is to make it work with mozilla-central and related mercurial repositories and support Mozilla workflows (try server, etc.). Repositories last used with versions lower than 0.5.0 are not supported. Please run `git cinnabar upgrade` with version 0.5.0 first. License: -------- The git-cinnabar source code is distributed under the terms of the Mozilla Public License version 2.0 (see the MPL-2.0 file), with parts (the git-core subdirectory) distributed under the terms of the GNU General Public License version 2.0 (see the git-core/COPYING file). As a consequence, git-cinnabar binary executables are distributed under the terms of the GNU General Public License version 2.0. Requirements: ------------- - Git (any version should work ; cinnabarclone bundles require 1.4.4). - In order to build from source: - Rust 1.75.0 or newer. - A C compiler (GCC or clang). - make. - CURL development headers and libraries (except on Windows). Please note that on MacOS they are included in the SDK. Setup: ------ ### Prebuilt binaries - Assuming a prebuilt binary is available for your system, get the [download.py script](https://raw.githubusercontent.com/glandium/git-cinnabar/master/download.py) and run it (requires python 3.9 or newer) with: ``` $ ./download.py ``` - Add the directory where the download happened to your PATH. If you have another git-remote-hg project in your PATH already, make sure the git-cinnabar path comes before. ### Cargo - Run the following: ``` $ cargo install --locked git-cinnabar $ git cinnabar setup ``` ### Build manually - Run the following: ``` $ git clone https://github.com/glandium/git-cinnabar $ cd git-cinnabar $ make ``` - Add the git-cinnabar directory to your PATH. Usage: ------ `$ git clone hg::` where `` can be a path to a local directory containing a mercurial repository, or a http, https or ssh url. Essentially, use git like you would for a git repository, but use a `hg::` url where you would use a `git://` url. See https://github.com/glandium/git-cinnabar/wiki/Mozilla:-A-git-workflow-for-Gecko-development for an example workflow for Mozilla repositories. Remote refs styles: ------------------- Mercurial has two different ways to handle what git would call branches: branches and bookmarks. Mercurial branches are permanent markers on each changeset that belongs to them, and bookmarks are similar to git branches. You may choose how to interact with those with the `cinnabar.refs` configuration. The following values are supported, either individually or combined in a comma-separated list: - `bookmarks`: in this mode, the mercurial repository's bookmarks are exposed as `refs/heads/$bookmark`. Practically speaking, this means the mercurial bookmarks appear as the remote git branches. - `tips`: in this mode, the most recent head of each mercurial branch is exposed as `refs/heads/$branch`. Any other head of the same branch is not exposed. This mode can be useful when branches have no more than one head. - `heads`: in this mode, the mercurial repository's heads are exposed as `refs/heads/$branch/$head`, where `$branch` is the mercurial branch name and `$head` is the full changeset sha1 of that head. When these values are used in combinations, the branch mappings are varied accordingly to make the type of each remote ref explicit and to avoid name collisions. - When combining `bookmarks` and `heads`, bookmarks are exposed as `refs/heads/bookmarks/$bookmark` and branch heads are exposed as `refs/heads/branches/$branch/$head` (where `$head` is the full changeset sha1 of the head). - When combining `bookmarks` and `tips`, bookmarks are exposed as `refs/heads/bookmarks/$bookmark` and branch tips are exposed as `refs/heads/branches/$branch`. Any other heads of the same branch are not exposed. - When combining all of `bookmarks`, `heads`, and `tips`, bookmarks are exposed as `refs/heads/bookmarks/$bookmark`, branch heads are exposed as `refs/heads/branches/$branch/$head` (where `$head` is the full changeset sha1 of the head), except for the branch tips, which are exposed as `refs/heads/branches/$branch/tip`. The shorthand `all` (also the default), is the combination of `bookmarks`, `heads`, and `tips`. The refs style can also be configured per remote with the `remote.$remote.cinnabar-refs` configuration. It is also possible to use `cinnabar.pushrefs` or `remote.$remote.cinnabar-pushrefs` to use a different scheme for pushes only. Tags: ----- You can get/update tags with the following command: `$ git cinnabar fetch --tags` Fetching a specific mercurial changeset: ---------------------------------------- It can sometimes be useful to fetch a specific mercurial changeset from a remote server, without fetching the entire repository. This can be done with a command line such as: `$ git cinnabar fetch hg:: ` Translating git commits to mercurial changesets and vice-versa: --------------------------------------------------------------- When dealing with a remote repository that doesn't use the same identifiers, things can easily get complicated. Git-cinnabar comes with commands to know the mercurial changeset a git commit represents and the other way around. The following command will give you the git commit corresponding to the given mercurial changeset sha1: `$ git cinnabar hg2git ` The following command will give you the mercurial changeset corresponding to the given git commit sha1: `$ git cinnabar git2hg ` Both commands allow abbreviated forms, as long as they are unambiguous (no need for all the 40 hex digits of the sha1). Avoiding metadata: ------------------ In some cases, it is not desirable to have git-cinnabar create metadata for all pushed commits. Notably, for volatile commits such as those used on the Mozilla try repository. By default, git-cinnabar doesn't store metadata when pushing to non-publishing repositories. It does otherwise. This behavior can be changed per-remote with a `remote.$remote.cinnabar-data` preference with one of the following values: - `always` - `never` - `phase` - `force` `phase` is the default described above. `always` and `never` are self-explanatory. `force` has the same meaning as `always`, but also forces `git push --dry-run` to store metadata. Cinnabar clone: --------------- For large repositories, an initial clone can take a large amount of time. A Mercurial server operator can install the extension provided in `mercurial/cinnabarclone.py`, and point to a git repository or bundle containing pre-generated git-cinnabar metadata. See details in the extension file. Users cloning the repository would automatically get the metadata from the git repository or bundle, and then pull the missing changesets from the Mercurial repository. Limitations: ------------ At the moment, push is limited to non-merge commits. There is no support for the following mercurial features: - obsolescence markers - phases - namespaces Checking corruptions: --------------------- Git-cinnabar is still in early infancy, and its metadata might get corrupted for some reason. The following command allows to detect various types of metadata corruption: `git cinnabar fsck` This command will fix the corruptions it can, as well as adjust some of the metadata that contains items that became unnecessary in newer versions. The `--full` option may be added for a more thorough validation of the metadata contents. Using this option adds a significant amount of work, and the command can take more than half an hour on repositories the size of mozilla-central. `hg://` urls: ----------- The msys shell (not msys2) doesn't keep hg::url intact when crossing the msys/native boundary, so when running cinnabar in a msys shell with a native git, the url is munged as `hg;;proto;\host\path\`, which git doesn't understand and doesn't even start redirecting to git-remote-hg. To allow such setups to still work, `hg://` urls are supported. But since mercurial can be either on many different protocols, we abuse the port in the given url to pass the protocol. A `hg://` url thus looks like: `hg://[:[.]]/` The default protocol is https, and the port can be omitted. - `hg::https://hg.mozilla.org/mozilla-central` becomes `hg://hg.mozilla.org/mozilla-central` - `hg::http://hg.mozilla.org/mozilla-central` becomes `hg://hg.mozilla.org:http/mozilla-central` - `hg::ssh://hg.mozilla.org/mozilla-central` becomes `hg://hg.mozilla.org:ssh/mozilla-central` - `hg::file:///some/path` becomes (awkward) `hg://:file/some/path` - `hg::http://localhost:8080/foo` becomes `hg://localhost:8080.http/foo` - `hg::tags:` becomes `hg://:tags` Compatibility: -------------- As of version 0.7, some corner cases in Mercurial repositories will generate different git commits than with prior versions of git-cinnabar. This means a fresh clone might have different git SHA-1s than existing clones, but this doesn't impact the use of existing clones with newer versions of git-cinnabar. Most repositories should remain non-affected by the change. You can set the `cinnabar.compat` git configuration to `0.6` to keep the previous behavior. Experimental features: ---------------------- Git-cinnabar has a set of experimental features that can be enabled independently. You can set the `cinnabar.experiments` git configuration to a comma-separated list of those features to enable the selected ones. The available features are: - **merge** Git-cinnabar currently doesn’t allow to push merge commits. The main reason for this is that generating the correct mercurial data for those merges is tricky, and needs to be gotten right. The main caveat with this experimental support for pushing merges is that it currently doesn’t handle the case where a file was moved on one of the branches the same way mercurial would (i.e. the information would be lost to mercurial users). - **similarity** Git doesn't track file copies or renames. It however has flags to try to detect them after the fact. On the other hand, Mercurial does track copies and renames, if they're recorded manually in the first place. Git-cinnabar does exact-copy/rename detection when pushing new commits to a Mercurial repository. The similarity feature allows to configure how (dis)similar files can be to be detected as a rename or copy. `similarity=100` is the default, which means only 100% identical files are considered. `similarity=90` means 90% identical files, and so on. This is equivalent to `git diff -C -C${similarity}%` git-cinnabar-0.7.0/build.rs000064400000000000000000000213431046102023000136360ustar 00000000000000/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #![allow(renamed_and_removed_lints)] #![deny(clippy::cloned_instead_of_copied)] #![deny(clippy::default_trait_access)] #![deny(clippy::flat_map_option)] #![deny(clippy::from_iter_instead_of_collect)] #![deny(clippy::implicit_clone)] #![deny(clippy::inconsistent_struct_constructor)] #![deny(clippy::large_types_passed_by_value)] #![deny(clippy::let_underscore_drop)] #![deny(clippy::let_unit_value)] #![deny(clippy::manual_ok_or)] #![deny(clippy::map_flatten)] #![deny(clippy::map_unwrap_or)] #![deny(clippy::needless_bitwise_bool)] #![deny(clippy::needless_continue)] #![deny(clippy::needless_for_each)] #![deny(clippy::option_option)] #![deny(clippy::range_minus_one)] #![deny(clippy::range_plus_one)] #![deny(clippy::redundant_closure_for_method_calls)] #![deny(clippy::redundant_else)] #![deny(clippy::ref_binding_to_reference)] #![deny(clippy::ref_option_ref)] #![deny(clippy::semicolon_if_nothing_returned)] #![deny(clippy::trait_duplication_in_bounds)] #![deny(clippy::transmute_ptr_to_ptr)] #![deny(clippy::type_repetition_in_bounds)] #![deny(clippy::unicode_not_nfc)] #![deny(clippy::unnecessary_wraps)] #![deny(clippy::unnested_or_patterns)] #![deny(clippy::unused_self)] use std::ffi::OsString; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; use itertools::Itertools; use make_cmd::gnu_make; #[cfg(not(windows))] fn normalize_path(s: &str) -> &str { s } #[cfg(windows)] fn normalize_path(s: &str) -> String { s.replace('\\', "/") } fn env(name: &str) -> String { std::env::var(name).unwrap_or_else(|_| panic!("Failed to get {}", name)) } fn env_os(name: &str) -> OsString { std::env::var_os(name).unwrap_or_else(|| panic!("Failed to get {}", name)) } fn prepare_make(make: &mut Command) -> &mut Command { let mut build_mk = PathBuf::from(env_os("CARGO_MANIFEST_DIR")); build_mk.push("src"); build_mk.push("build.mk"); let mut result = make.arg("-f").arg(&build_mk); for chunk in &std::env::var("CINNABAR_MAKE_FLAGS") .unwrap_or_else(|_| "".into()) .split('\'') .chunks(2) { let chunk: Vec<_> = chunk.collect(); if chunk.len() == 2 { let name = chunk[0].trim_start().trim_end_matches('='); let value = chunk[1]; result = result.arg(format!("{}={}", name, value)); } } result.env_remove("PROFILE") } fn main() { let target_arch = env("CARGO_CFG_TARGET_ARCH"); let target_os = env("CARGO_CFG_TARGET_OS"); let target_env = env("CARGO_CFG_TARGET_ENV"); let target_endian = env("CARGO_CFG_TARGET_ENDIAN"); let target_pointer_width = env("CARGO_CFG_TARGET_POINTER_WIDTH"); if target_os == "windows" && target_env != "gnu" { panic!( "Compilation for {}-{} is not supported", target_os, target_env ); } let dir = env_os("CARGO_MANIFEST_DIR"); let dir = Path::new(&dir); let extra_args = if target_os == "linux" { &["uname_S=Linux"][..] } else if target_os == "macos" { &["uname_S=Darwin", "uname_R=15.0"][..] } else if target_os == "windows" { static EXTRA_ARGS: [&str; 3] = ["MINGW_WRAPPERS=1", "uname_S=MINGW", "MSYSTEM=MINGW64"]; // If the filesystem is case insensitive, we don't want to use MINGW_WRAPPERS. if dir.join("src").join("mingw").join("psapi.h").exists() { &EXTRA_ARGS[1..] } else { &EXTRA_ARGS[..] } } else if std::env::var("CINNABAR_CROSS_COMPILE_I_KNOW_WHAT_I_M_DOING").is_err() && (target_arch != target::arch() || target_os != target::os() || target_env != target::env() || target_endian != target::endian() || target_pointer_width != target::pointer_width()) { panic!("Cross-compilation is not supported"); } else { &[][..] }; let out_dir = PathBuf::from(env_os("OUT_DIR")); let mut make = gnu_make(); let cmd = prepare_make(&mut make); cmd.arg("libcinnabar.a") .arg("V=1") .arg("HAVE_WPGMPTR=") .arg("LAZYLOAD_LIBCURL=") .arg("USE_LIBPCRE1=") .arg("USE_LIBPCRE2=") .arg("NO_REGEX=1") .arg("NO_ICONV=1") .arg("USE_MIMALLOC=") .arg("FSMONITOR_DAEMON_BACKEND=") .arg("GENERATED_H=") .args(extra_args); let mut build = cc::Build::new(); build.force_frame_pointer(true).warnings(false); let compiler = build.get_compiler(); let ar = build.get_archiver(); let cflags = [ compiler.cflags_env().into_string().ok(), // cc-rs ignores TARGET_CFLAGS when TARGET == HOST if env("TARGET") == env("HOST") { std::env::var("TARGET_CFLAGS").ok() } else { None }, std::env::var("DEP_CURL_INCLUDE") .map(|i| format!("-I{}", normalize_path(&i))) .ok(), std::env::var("DEP_CURL_STATIC") .map(|_| "-DCURL_STATICLIB".to_string()) .ok(), std::env::var("DEP_Z_INCLUDE") .map(|i| format!("-I{}", normalize_path(&i))) .ok(), ] .iter() .filter_map(Option::as_deref) .chain( match &*target_os { "windows" => &[ "-Dpthread_create=win32_pthread_create", "-Dpthread_self=win32_pthread_self", "-D_POSIX_THREAD_SAFE_FUNCTIONS=200112L", ][..], _ => &[][..], } .iter() .copied(), ) .join(" "); cmd.arg(format!("CFLAGS={}", cflags)); cmd.arg(format!("CC={}", compiler.path().display())); cmd.arg(format!("AR={}", ar.get_program().to_str().unwrap())); let compile_commands = cfg!(feature = "compile_commands") || std::env::var("VSCODE_PID").is_ok(); if compile_commands { cmd.arg("GENERATE_COMPILATION_DATABASE=yes"); cmd.arg("compile_commands.json"); } if cfg!(feature = "gitdev") || std::env::var("PROFILE").as_deref() == Ok("debug") { cmd.arg("DEVELOPER=1"); } cmd.arg("COMPUTE_HEADER_DEPENDENCIES=yes"); println!("cargo:rerun-if-env-changed=CFLAGS_{}", env("TARGET")); println!( "cargo:rerun-if-env-changed=CFLAGS_{}", env("TARGET").replace('-', "_") ); println!("cargo:rerun-if-env-changed=CFLAGS"); println!("cargo:rerun-if-env-changed=TARGET_CFLAGS"); println!("cargo:rerun-if-env-changed=DEP_CURL_INCLUDE"); println!("cargo:rerun-if-env-changed=DEP_CURL_STATIC"); println!("cargo:rerun-if-env-changed=DEP_Z_INCLUDE"); println!("cargo:rerun-if-env-changed=CC_{}", env("TARGET")); println!( "cargo:rerun-if-env-changed=CC_{}", env("TARGET").replace('-', "_") ); println!("cargo:rerun-if-env-changed=CC"); println!("cargo:rerun-if-env-changed=CRATE_CC_NO_DEFAULTS"); assert!(cmd .env("MAKEFLAGS", format!("-j {}", env("CARGO_MAKEFLAGS"))) .current_dir(&out_dir) .status() .expect("Failed to execute GNU make") .success()); if compile_commands { std::fs::copy( out_dir.join("compile_commands.json"), dir.join("compile_commands.json"), ) .ok(); } let mut make = gnu_make(); let output = prepare_make(&mut make) .arg("--no-print-directory") .arg("linker-flags") .arg("USE_LIBPCRE1=") .arg("USE_LIBPCRE2=") .arg("USE_NED_ALLOCATOR=") .arg("NO_ICONV=1") .args(extra_args) .current_dir(&out_dir) .output() .expect("Failed to execute GNU make"); let output = String::from_utf8(output.stdout).unwrap(); println!("cargo:rustc-link-lib=static=cinnabar"); if target_os == "windows" && target_env == "gnu" { println!("cargo:rustc-link-lib=ssp_nonshared"); println!("cargo:rustc-link-lib=static=ssp"); } for flag in output.split_whitespace() { if let Some(lib) = flag.strip_prefix("-l") { println!("cargo:rustc-link-lib={}", lib); } else if let Some(libdir) = flag.strip_prefix("-L") { println!("cargo:rustc-link-search=native={}", libdir); } } for src in fs::read_dir(dir.join("src")).unwrap() { let path = src.unwrap().path(); let name = path.file_name().unwrap().to_str().unwrap(); if (name.ends_with(".h") || name.ends_with(".c") || name.ends_with(".c.patch") || name.ends_with(".mk")) && !name.ends_with("patched.c") { println!("cargo:rerun-if-changed={}", path.display()); } } println!("cargo:rerun-if-env-changed=CINNABAR_MAKE_FLAGS"); } git-cinnabar-0.7.0/git-core/COPYING000064400000000000000000000445151046102023000147430ustar 00000000000000 Note that the only valid version of the GPL as far as this project is concerned is _this_ particular version of the license (ie v2, not v2.2 or v3.x or whatever), unless explicitly otherwise stated. HOWEVER, in order to allow a migration to GPLv3 if that seems like a good idea, I also ask that people involved with the project make their preferences known. In particular, if you trust me to make that decision, you might note so in your copyright message, ie something like This file is licensed under the GPL v2, or a later version at the discretion of Linus. might avoid issues. But we can also just decide to synchronize and contact all copyright holders on record if/when the occasion arises. Linus Torvalds ---------------------------------------- GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. git-cinnabar-0.7.0/git-core/GIT-VERSION-FILE.in000064400000000000000000000000321046102023000165050ustar 00000000000000GIT_VERSION=@GIT_VERSION@ git-cinnabar-0.7.0/git-core/GIT-VERSION-GEN000075500000000000000000000045011046102023000160020ustar 00000000000000#!/bin/sh DEF_VER=v2.48.1 LF=' ' if test "$#" -ne 3 then echo >&2 "USAGE: $0 " exit 1 fi SOURCE_DIR="$1" INPUT="$2" OUTPUT="$3" if ! test -f "$INPUT" then echo >&2 "Input is not a file: $INPUT" exit 1 fi # Protect us from reading Git version information outside of the Git directory # in case it is not a repository itself, but embedded in an unrelated # repository. GIT_CEILING_DIRECTORIES="$SOURCE_DIR/.." export GIT_CEILING_DIRECTORIES if test -z "$GIT_VERSION" then # First see if there is a version file (included in release tarballs), # then try git-describe, then default. if test -f "$SOURCE_DIR"/version then VN=$(cat "$SOURCE_DIR"/version) || VN="$DEF_VER" elif { test -d "$SOURCE_DIR/.git" || test -d "${GIT_DIR:-.git}" || test -f "$SOURCE_DIR"/.git; } && VN=$(git -C "$SOURCE_DIR" describe --match "v[0-9]*" HEAD 2>/dev/null) && case "$VN" in *$LF*) (exit 1) ;; v[0-9]*) git -C "$SOURCE_DIR" update-index -q --refresh test -z "$(git -C "$SOURCE_DIR" diff-index --name-only HEAD --)" || VN="$VN-dirty" ;; esac then VN=$(echo "$VN" | sed -e 's/-/./g'); else VN="$DEF_VER" fi GIT_VERSION=$(expr "$VN" : v*'\(.*\)') fi if test -z "$GIT_BUILT_FROM_COMMIT" then GIT_BUILT_FROM_COMMIT=$(git -C "$SOURCE_DIR" rev-parse -q --verify HEAD 2>/dev/null) fi if test -z "$GIT_DATE" then GIT_DATE=$(git -C "$SOURCE_DIR" show --quiet --format='%as' 2>/dev/null) fi if test -z "$GIT_USER_AGENT" then GIT_USER_AGENT="git/$GIT_VERSION" fi # While released Git versions only have three numbers, development builds also # have a fourth number that corresponds to the number of patches since the last # release. read GIT_MAJOR_VERSION GIT_MINOR_VERSION GIT_MICRO_VERSION GIT_PATCH_LEVEL trailing <"$OUTPUT".$$+ if ! test -f "$OUTPUT" || ! cmp "$OUTPUT".$$+ "$OUTPUT" >/dev/null then mv "$OUTPUT".$$+ "$OUTPUT" else rm "$OUTPUT".$$+ fi git-cinnabar-0.7.0/git-core/LGPL-2.1000064400000000000000000000643531046102023000146710ustar 00000000000000 While most of this project is under the GPL (see COPYING), the xdiff/ library and some libc code from compat/ are licensed under the GNU LGPL, version 2.1 or (at your option) any later version and some other files are under other licenses. Check the individual files to be sure. ---------------------------------------- GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! git-cinnabar-0.7.0/git-core/Makefile000064400000000000000000003651521046102023000153530ustar 00000000000000# The default target of this Makefile is... all:: # Import tree-wide shared Makefile behavior and libraries include shared.mak # == Makefile defines == # # These defines change the behavior of the Makefile itself, but have # no impact on what it builds: # # Define V=1 to have a more verbose compile. # # == Portability and optional library defines == # # These defines indicate what Git can expect from the OS, what # libraries are available etc. Much of this is auto-detected in # config.mak.uname, or in configure.ac when using the optional "make # configure && ./configure" (see INSTALL). # # Define SHELL_PATH to a POSIX shell if your /bin/sh is broken. # # Define SANE_TOOL_PATH to a colon-separated list of paths to prepend # to PATH if your tools in /usr/bin are broken. # # Define SOCKLEN_T to a suitable type (such as 'size_t') if your # system headers do not define a socklen_t type. # # Define INLINE to a suitable substitute (such as '__inline' or '') if git # fails to compile with errors about undefined inline functions or similar. # # Define SNPRINTF_RETURNS_BOGUS if you are on a system which snprintf() # or vsnprintf() return -1 instead of number of characters which would # have been written to the final string if enough space had been available. # # Define FREAD_READS_DIRECTORIES if you are on a system which succeeds # when attempting to read from an fopen'ed directory (or even to fopen # it at all). # # Define OPEN_RETURNS_EINTR if your open() system call may return EINTR # when a signal is received (as opposed to restarting). # # Define NO_OPENSSL environment variable if you do not have OpenSSL. # # Define HAVE_ALLOCA_H if you have working alloca(3) defined in that header. # # Define HAVE_PATHS_H if you have paths.h and want to use the default PATH # it specifies. # # Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks # d_type in struct dirent (Cygwin 1.5, fixed in Cygwin 1.7). # # Define HAVE_STRINGS_H if you have strings.h and need it for strcasecmp. # # Define NO_STRCASESTR if you don't have strcasestr. # # Define NO_MEMMEM if you don't have memmem. # # Define NO_GETPAGESIZE if you don't have getpagesize. # # Define NO_STRLCPY if you don't have strlcpy. # # Define NO_STRTOUMAX if you don't have both strtoimax and strtoumax in the # C library. If your compiler also does not support long long or does not have # strtoull, define NO_STRTOULL. # # Define NO_SETENV if you don't have setenv in the C library. # # Define NO_UNSETENV if you don't have unsetenv in the C library. # # Define NO_MKDTEMP if you don't have mkdtemp in the C library. # # Define MKDIR_WO_TRAILING_SLASH if your mkdir() can't deal with trailing slash. # # Define NO_GECOS_IN_PWENT if you don't have pw_gecos in struct passwd # in the C library. # # Define NO_LIBGEN_H if you don't have libgen.h. # # Define NEEDS_LIBGEN if your libgen needs -lgen when linking # # Define NO_SYS_SELECT_H if you don't have sys/select.h. # # Define NO_SYMLINK_HEAD if you never want .git/HEAD to be a symbolic link. # Enable it on Windows. By default, symrefs are still used. # # Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability # tests. These tests take up a significant amount of the total test time # but are not needed unless you plan to talk to SVN repos. # # Define NO_FINK if you are building on Darwin/Mac OS X, have Fink # installed in /sw, but don't want GIT to link against any libraries # installed there. If defined you may specify your own (or Fink's) # include directories and library directories by defining CFLAGS # and LDFLAGS appropriately. # # Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X, # have DarwinPorts installed in /opt/local, but don't want GIT to # link against any libraries installed there. If defined you may # specify your own (or DarwinPort's) include directories and # library directories by defining CFLAGS and LDFLAGS appropriately. # # Define NO_APPLE_COMMON_CRYPTO if you are building on Darwin/Mac OS X # and do not want to use Apple's CommonCrypto library. This allows you # to provide your own OpenSSL library, for example from MacPorts. # # Define NEEDS_CRYPTO_WITH_SSL if you need -lcrypto when using -lssl (Darwin). # # Define NEEDS_SSL_WITH_CRYPTO if you need -lssl when using -lcrypto (Darwin). # # Define NEEDS_LIBICONV if linking with libc is not enough (Darwin). # # Define NEEDS_LIBINTL_BEFORE_LIBICONV if you need libintl before libiconv. # # Define NO_INTPTR_T if you don't have intptr_t or uintptr_t. # # Define NO_UINTMAX_T if you don't have uintmax_t. # # Define NEEDS_SOCKET if linking with libc is not enough (SunOS, # Patrick Mauritz). # # Define NEEDS_RESOLV if linking with -lnsl and/or -lsocket is not enough. # Notably on Solaris hstrerror resides in libresolv and on Solaris 7 # inet_ntop and inet_pton additionally reside there. # # Define NO_MMAP if you want to avoid mmap. # # Define MMAP_PREVENTS_DELETE if a file that is currently mmapped cannot be # deleted or cannot be replaced using rename(). # # Define NO_POLL_H if you don't have poll.h. # # Define NO_SYS_POLL_H if you don't have sys/poll.h. # # Define NO_POLL if you do not have or don't want to use poll(). # This also implies NO_POLL_H and NO_SYS_POLL_H. # # Define NEEDS_SYS_PARAM_H if you need to include sys/param.h to compile, # *PLEASE* REPORT to git@vger.kernel.org if your platform needs this; # we want to know more about the issue. # # Define NO_PTHREADS if you do not have or do not want to use Pthreads. # # Define NO_PREAD if you have a problem with pread() system call (e.g. # cygwin1.dll before v1.5.22). # # Define NO_SETITIMER if you don't have setitimer() # # Define NO_STRUCT_ITIMERVAL if you don't have struct itimerval # This also implies NO_SETITIMER # # Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is # generally faster on your platform than accessing the working directory. # # Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support # the executable mode bit, but doesn't really do so. # # Define CSPRNG_METHOD to "arc4random" if your system has arc4random and # arc4random_buf, "libbsd" if your system has those functions from libbsd, # "getrandom" if your system has getrandom, "getentropy" if your system has # getentropy, "rtlgenrandom" for RtlGenRandom (Windows only), or "openssl" if # you'd want to use the OpenSSL CSPRNG. You may set multiple options with # spaces, in which case a suitable option will be chosen. If unset or set to # anything else, defaults to using "/dev/urandom". # # Define NEEDS_MODE_TRANSLATION if your OS strays from the typical file type # bits in mode values (e.g. z/OS defines I_SFMT to 0xFF000000 as opposed to the # usual 0xF000). # # Define NO_IPV6 if you lack IPv6 support and getaddrinfo(). # # Define NO_UNIX_SOCKETS if your system does not offer unix sockets. # # Define NO_SOCKADDR_STORAGE if your platform does not have struct # sockaddr_storage. # # Define NO_ICONV if your libc does not properly support iconv. # # Define OLD_ICONV if your library has an old iconv(), where the second # (input buffer pointer) parameter is declared with type (const char **). # # Define ICONV_OMITS_BOM if your iconv implementation does not write a # byte-order mark (BOM) when writing UTF-16 or UTF-32 and always writes in # big-endian format. # # Define NO_DEFLATE_BOUND if your zlib does not have deflateBound. # # Define NO_NORETURN if using buggy versions of gcc 4.6+ and profile feedback, # as the compiler can crash (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49299) # # Define USE_NSEC below if you want git to care about sub-second file mtimes # and ctimes. Note that you need recent glibc (at least 2.2.4) for this. On # Linux, kernel 2.6.11 or newer is required for reliable sub-second file times # on file systems with exactly 1 ns or 1 s resolution. If you intend to use Git # on other file systems (e.g. CEPH, CIFS, NTFS, UDF), don't enable USE_NSEC. See # Documentation/technical/racy-git.txt for details. # # Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of # "st_ctim" # # Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec" # available. This automatically turns USE_NSEC off. # # Define USE_STDEV below if you want git to care about the underlying device # change being considered an inode change from the update-index perspective. # # Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks # field that counts the on-disk footprint in 512-byte blocks. # # Define USE_ASCIIDOCTOR to use Asciidoctor instead of AsciiDoc to build the # documentation. # # Define ASCIIDOCTOR_EXTENSIONS_LAB to point to the location of the Asciidoctor # Extensions Lab if you have it available. # # Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl). # # Define NO_PERL if you do not want Perl scripts or libraries at all. # # Define NO_PERL_CPAN_FALLBACKS if you do not want to install bundled # copies of CPAN modules that serve as a fallback in case the modules # are not available on the system. This option is intended for # distributions that want to use their packaged versions of Perl # modules, instead of the fallbacks shipped with Git. # # Define NO_GITWEB if you do not want to build or install # 'gitweb'. Note that defining NO_PERL currently has the same effect # on not installing gitweb, but not on whether it's built in the # gitweb/ directory. # # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python # but /usr/bin/python2.7 or /usr/bin/python3 on some platforms). # # Define NO_PYTHON if you do not want Python scripts or libraries at all. # # Define NO_TCLTK if you do not want Tcl/Tk GUI. # # The TCL_PATH variable governs the location of the Tcl interpreter # used to optimize git-gui for your system. Only used if NO_TCLTK # is not set. Defaults to the bare 'tclsh'. # # The TCLTK_PATH variable governs the location of the Tcl/Tk interpreter. # If not set it defaults to the bare 'wish'. If it is set to the empty # string then NO_TCLTK will be forced (this is used by configure script). # # Define INTERNAL_QSORT to use Git's implementation of qsort(), which # is a simplified version of the merge sort used in glibc. This is # recommended if Git triggers O(n^2) behavior in your platform's qsort(). # # Define HAVE_ISO_QSORT_S if your platform provides a qsort_s() that's # compatible with the one described in C11 Annex K. # # Define UNRELIABLE_FSTAT if your system's fstat does not return the same # information on a not yet closed file that lstat would return for the same # file after it was closed. # # Define OBJECT_CREATION_USES_RENAMES if your operating systems has problems # when hardlinking a file to another name and unlinking the original file right # away (some NTFS drivers seem to zero the contents in that scenario). # # Define INSTALL_SYMLINKS if you prefer to have everything that can be # symlinked between bin/ and libexec/ to use relative symlinks between # the two. This option overrides NO_CROSS_DIRECTORY_HARDLINKS and # NO_INSTALL_HARDLINKS which will also use symlinking by indirection # within the same directory in some cases, INSTALL_SYMLINKS will # always symlink to the final target directly. # # Define NO_CROSS_DIRECTORY_HARDLINKS if you plan to distribute the installed # programs as a tar, where bin/ and libexec/ might be on different file systems. # # Define NO_INSTALL_HARDLINKS if you prefer to use either symbolic links or # copies to install built-in git commands e.g. git-cat-file. # # Define SKIP_DASHED_BUILT_INS if you do not need the dashed versions of the # built-ins to be linked/copied at all. # # Define USE_NED_ALLOCATOR if you want to replace the platforms default # memory allocators with the nedmalloc allocator written by Niall Douglas. # # Define OVERRIDE_STRDUP to override the libc version of strdup(3). # This is necessary when using a custom allocator in order to avoid # crashes due to allocation and free working on different 'heaps'. # It's defined automatically if USE_NED_ALLOCATOR is set. # # Define NO_REGEX if your C library lacks regex support with REG_STARTEND # feature. # # Define USE_ENHANCED_BASIC_REGULAR_EXPRESSIONS if your C library provides # the flag REG_ENHANCED and you'd like to use it to enable enhanced basic # regular expressions. # # Define HAVE_DEV_TTY if your system can open /dev/tty to interact with the # user. # # Define JSMIN to point to JavaScript minifier that functions as # a filter to have gitweb.js minified. # # Define CSSMIN to point to a CSS minifier in order to generate a minified # version of gitweb.css # # Define DEFAULT_PAGER to a sensible pager command (defaults to "less") if # you want to use something different. The value will be interpreted by the # shell at runtime when it is used. # # Define DEFAULT_EDITOR to a sensible editor command (defaults to "vi") if you # want to use something different. The value will be interpreted by the shell # if necessary when it is used. Examples: # # DEFAULT_EDITOR='~/bin/vi', # DEFAULT_EDITOR='$GIT_FALLBACK_EDITOR', # DEFAULT_EDITOR='"C:\Program Files\Vim\gvim.exe" --nofork' # # Define COMPUTE_HEADER_DEPENDENCIES to "yes" if you want dependencies on # header files to be automatically computed, to avoid rebuilding objects when # an unrelated header file changes. Define it to "no" to use the hard-coded # dependency rules. The default is "auto", which means to use computed header # dependencies if your compiler is detected to support it. # # Define NATIVE_CRLF if your platform uses CRLF for line endings. # # Define GIT_USER_AGENT if you want to change how git identifies itself during # network interactions. The default is "git/$(GIT_VERSION)". # # Define DEFAULT_HELP_FORMAT to "man", "info" or "html" # (defaults to "man") if you want to have a different default when # "git help" is called without a parameter specifying the format. # # Define GIT_TEST_INDEX_VERSION to 2, 3 or 4 to run the test suite # with a different indexfile format version. If it isn't set the index # file format used is index-v[23]. # # Define GIT_TEST_UTF8_LOCALE to preferred utf-8 locale for testing. # If it isn't set, fallback to $LC_ALL, $LANG or use the first utf-8 # locale returned by "locale -a". # # Define HAVE_CLOCK_GETTIME if your platform has clock_gettime. # # Define HAVE_CLOCK_MONOTONIC if your platform has CLOCK_MONOTONIC. # # Define HAVE_SYNC_FILE_RANGE if your platform has sync_file_range. # # Define NEEDS_LIBRT if your platform requires linking with librt (glibc version # before 2.17) for clock_gettime and CLOCK_MONOTONIC. # # Define HAVE_BSD_SYSCTL if your platform has a BSD-compatible sysctl function. # # Define HAVE_GETDELIM if your system has the getdelim() function. # # Define FILENO_IS_A_MACRO if fileno() is a macro, not a real function. # # Define NEED_ACCESS_ROOT_HANDLER if access() under root may success for X_OK # even if execution permission isn't granted for any user. # # Define PAGER_ENV to a SP separated VAR=VAL pairs to define # default environment variables to be passed when a pager is spawned, e.g. # # PAGER_ENV = LESS=FRX LV=-c # # to say "export LESS=FRX (and LV=-c) if the environment variable # LESS (and LV) is not set, respectively". # # Define TEST_SHELL_PATH if you want to use a shell besides SHELL_PATH for # running the test scripts (e.g., bash has better support for "set -x" # tracing). # # When cross-compiling, define HOST_CPU as the canonical name of the CPU on # which the built Git will run (for instance "x86_64"). # # Define RUNTIME_PREFIX to configure Git to resolve its ancillary tooling and # support files relative to the location of the runtime binary, rather than # hard-coding them into the binary. Git installations built with RUNTIME_PREFIX # can be moved to arbitrary filesystem locations. RUNTIME_PREFIX also causes # Perl scripts to use a modified entry point header allowing them to resolve # support files at runtime. # # When using RUNTIME_PREFIX, define HAVE_BSD_KERN_PROC_SYSCTL if your platform # supports the KERN_PROC BSD sysctl function. # # When using RUNTIME_PREFIX, define PROCFS_EXECUTABLE_PATH if your platform # mounts a "procfs" filesystem capable of resolving the path of the current # executable. If defined, this must be the canonical path for the "procfs" # current executable path. # # When using RUNTIME_PREFIX, define HAVE_NS_GET_EXECUTABLE_PATH if your platform # supports calling _NSGetExecutablePath to retrieve the path of the running # executable. # # When using RUNTIME_PREFIX, define HAVE_ZOS_GET_EXECUTABLE_PATH if your platform # supports calling __getprogramdir and getprogname to retrieve the path of the # running executable. # # When using RUNTIME_PREFIX, define HAVE_WPGMPTR if your platform offers # the global variable _wpgmptr containing the absolute path of the current # executable (this is the case on Windows). # # INSTALL_STRIP can be set to "-s" to strip binaries during installation, # if your $(INSTALL) command supports the option. # # Define GENERATE_COMPILATION_DATABASE to "yes" to generate JSON compilation # database entries during compilation if your compiler supports it, using the # `-MJ` flag. The JSON entries will be placed in the `compile_commands/` # directory, and the JSON compilation database 'compile_commands.json' will be # created at the root of the repository. # # If your platform supports a built-in fsmonitor backend, set # FSMONITOR_DAEMON_BACKEND to the "" of the corresponding # `compat/fsmonitor/fsm-listen-.c` and # `compat/fsmonitor/fsm-health-.c` files # that implement the `fsm_listen__*()` and `fsm_health__*()` routines. # # If your platform has OS-specific ways to tell if a repo is incompatible with # fsmonitor (whether the hook or IPC daemon version), set FSMONITOR_OS_SETTINGS # to the "" of the corresponding `compat/fsmonitor/fsm-settings-.c` # that implements the `fsm_os_settings__*()` routines. # # Define LINK_FUZZ_PROGRAMS if you want `make all` to also build the fuzz test # programs in oss-fuzz/. # # === Optional library: libintl === # # Define NO_GETTEXT if you don't want Git output to be translated. # A translated Git requires GNU libintl or another gettext implementation, # plus libintl-perl at runtime. # # Define USE_GETTEXT_SCHEME and set it to 'fallthrough', if you don't trust # the installed gettext translation of the shell scripts output. # # Define HAVE_LIBCHARSET_H if you haven't set NO_GETTEXT and you can't # trust the langinfo.h's nl_langinfo(CODESET) function to return the # current character set. GNU and Solaris have a nl_langinfo(CODESET), # FreeBSD can use either, but MinGW and some others need to use # libcharset.h's locale_charset() instead. # # Define CHARSET_LIB to the library you need to link with in order to # use locale_charset() function. On some platforms this needs to set to # -lcharset, on others to -liconv . # # Define LIBC_CONTAINS_LIBINTL if your gettext implementation doesn't # need -lintl when linking. # # Define NO_MSGFMT_EXTENDED_OPTIONS if your implementation of msgfmt # doesn't support GNU extensions like --check and --statistics # # === Optional library: libexpat === # # Define NO_EXPAT if you do not have expat installed. git-http-push is # not built, and you cannot push using http:// and https:// transports (dumb). # # Define EXPATDIR=/foo/bar if your expat header and library files are in # /foo/bar/include and /foo/bar/lib directories. # # Define EXPAT_NEEDS_XMLPARSE_H if you have an old version of expat (e.g., # 1.1 or 1.2) that provides xmlparse.h instead of expat.h. # === Optional library: libcurl === # # Define NO_CURL if you do not have libcurl installed. git-http-fetch and # git-http-push are not built, and you cannot use http:// and https:// # transports (neither smart nor dumb). # # Define CURLDIR=/foo/bar if your curl header and library files are in # /foo/bar/include and /foo/bar/lib directories. # # Define CURL_CONFIG to curl's configuration program that prints information # about the library (e.g., its version number). The default is 'curl-config'. # # Define CURL_LDFLAGS to specify flags that you need to link when using libcurl, # if you do not want to rely on the libraries provided by CURL_CONFIG. The # default value is a result of `curl-config --libs`. An example value for # CURL_LDFLAGS is as follows: # # CURL_LDFLAGS=-lcurl # # Define LAZYLOAD_LIBCURL to dynamically load the libcurl; This can be useful # if Multiple libcurl versions exist (with different file names) that link to # various SSL/TLS backends, to support the `http.sslBackend` runtime switch in # such a scenario. # # === Optional library: libpcre2 === # # Define USE_LIBPCRE if you have and want to use libpcre. Various # commands such as log and grep offer runtime options to use # Perl-compatible regular expressions instead of standard or extended # POSIX regular expressions. # # Only libpcre version 2 is supported. USE_LIBPCRE2 is a synonym for # USE_LIBPCRE, support for the old USE_LIBPCRE1 has been removed. # # Define LIBPCREDIR=/foo/bar if your PCRE header and library files are # in /foo/bar/include and /foo/bar/lib directories. # # == SHA-1 and SHA-256 defines == # # === SHA-1 backend === # # ==== Security ==== # # Due to the SHAttered (https://shattered.io) attack vector on SHA-1 # it's strongly recommended to use the sha1collisiondetection # counter-cryptanalysis library for SHA-1 hashing. # # If you know that you can trust the repository contents, or where # potential SHA-1 attacks are otherwise mitigated the other backends # listed in "SHA-1 implementations" are faster than # sha1collisiondetection. # # ==== Default SHA-1 backend ==== # # If no *_SHA1 backend is picked, the first supported one listed in # "SHA-1 implementations" will be picked. # # ==== Options common to all SHA-1 implementations ==== # # Define SHA1_MAX_BLOCK_SIZE to limit the amount of data that will be hashed # in one call to the platform's SHA1_Update(). e.g. APPLE_COMMON_CRYPTO # wants 'SHA1_MAX_BLOCK_SIZE=1024L*1024L*1024L' defined. # # ==== SHA-1 implementations ==== # # Define OPENSSL_SHA1 to link to the SHA-1 routines from the OpenSSL # library. # # Define BLK_SHA1 to make use of optimized C SHA-1 routines bundled # with git (in the block-sha1/ directory). # # Define APPLE_COMMON_CRYPTO_SHA1 to use Apple's CommonCrypto for # SHA-1. # # Define the same Makefile knobs as above, but suffixed with _UNSAFE to # use the corresponding implementations for unsafe SHA-1 hashing for # non-cryptographic purposes. # # If don't enable any of the *_SHA1 settings in this section, Git will # default to its built-in sha1collisiondetection library, which is a # collision-detecting sha1 This is slower, but may detect attempted # collision attacks. # # ==== Options for the sha1collisiondetection library ==== # # Define DC_SHA1_EXTERNAL if you want to build / link # git with the external SHA1 collision-detect library. # Without this option, i.e. the default behavior is to build git with its # own built-in code (or submodule). # # Define DC_SHA1_SUBMODULE to use the # sha1collisiondetection shipped as a submodule instead of the # non-submodule copy in sha1dc/. This is an experimental option used # by the git project to migrate to using sha1collisiondetection as a # submodule. # # === SHA-256 backend === # # ==== Security ==== # # Unlike SHA-1 the SHA-256 algorithm does not suffer from any known # vulnerabilities, so any implementation will do. # # ==== SHA-256 implementations ==== # # Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL. # # Define NETTLE_SHA256 to use the SHA-256 routines in libnettle. # # Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt. # # If don't enable any of the *_SHA256 settings in this section, Git # will default to its built-in sha256 implementation. # # == DEVELOPER defines == # # Define DEVELOPER to enable more compiler warnings. Compiler version # and family are auto detected, but could be overridden by defining # COMPILER_FEATURES (see config.mak.dev). You can still set # CFLAGS="..." in combination with DEVELOPER enables, whether that's # for tweaking something unrelated (e.g. optimization level), or for # selectively overriding something DEVELOPER or one of the DEVOPTS # (see just below) brings in. # # When DEVELOPER is set, DEVOPTS can be used to control compiler # options. This variable contains keywords separated by # whitespace. The following keywords are recognized: # # no-error: # # suppresses the -Werror that implicitly comes with # DEVELOPER=1. Useful for getting the full set of errors # without immediately dying, or for logging them. # # extra-all: # # The DEVELOPER mode enables -Wextra with a few exceptions. By # setting this flag the exceptions are removed, and all of # -Wextra is used. # # no-pedantic: # # Disable -pedantic compilation. # Set our default configuration. # # Among the variables below, these: # gitexecdir # template_dir # sysconfdir # can be specified as a relative path some/where/else; # this is interpreted as relative to $(prefix) and "git" built with # RUNTIME_PREFIX flag will figure out (at runtime) where they are # based on the path to the executable. # Additionally, the following will be treated as relative by "git" if they # begin with "$(prefix)/": # mandir # infodir # htmldir # localedir # perllibdir # This can help installing the suite in a relocatable way. prefix = $(HOME) bindir = $(prefix)/bin mandir = $(prefix)/share/man infodir = $(prefix)/share/info gitexecdir = libexec/git-core mergetoolsdir = $(gitexecdir)/mergetools sharedir = $(prefix)/share gitwebdir = $(sharedir)/gitweb gitwebstaticdir = $(gitwebdir)/static perllibdir = $(sharedir)/perl5 localedir = $(sharedir)/locale template_dir = share/git-core/templates htmldir = $(prefix)/share/doc/git-doc ETC_GITCONFIG = $(sysconfdir)/gitconfig ETC_GITATTRIBUTES = $(sysconfdir)/gitattributes lib = lib # DESTDIR = pathsep = : bindir_relative = $(patsubst $(prefix)/%,%,$(bindir)) mandir_relative = $(patsubst $(prefix)/%,%,$(mandir)) infodir_relative = $(patsubst $(prefix)/%,%,$(infodir)) gitexecdir_relative = $(patsubst $(prefix)/%,%,$(gitexecdir)) localedir_relative = $(patsubst $(prefix)/%,%,$(localedir)) htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir)) perllibdir_relative = $(patsubst $(prefix)/%,%,$(perllibdir)) export prefix bindir sharedir sysconfdir perllibdir localedir # Set our default programs CC = cc AR = ar RM = rm -f DIFF = diff TAR = tar FIND = find INSTALL = install TCL_PATH = tclsh TCLTK_PATH = wish XGETTEXT = xgettext MSGCAT = msgcat MSGFMT = msgfmt MSGMERGE = msgmerge CURL_CONFIG = curl-config GCOV = gcov STRIP = strip SPATCH = spatch export TCL_PATH TCLTK_PATH # Set our default LIBS variables PTHREAD_LIBS = -lpthread # Guard against environment variables BUILTIN_OBJS = BUILT_INS = COMPAT_CFLAGS = COMPAT_OBJS = XDIFF_OBJS = GENERATED_H = EXTRA_CPPFLAGS = FUZZ_OBJS = FUZZ_PROGRAMS = GIT_OBJS = LIB_OBJS = SCALAR_OBJS = OBJECTS = OTHER_PROGRAMS = PROGRAM_OBJS = PROGRAMS = EXCLUDED_PROGRAMS = SCRIPT_PERL = SCRIPT_PYTHON = SCRIPT_SH = SCRIPT_LIB = TEST_BUILTINS_OBJS = TEST_OBJS = TEST_PROGRAMS_NEED_X = THIRD_PARTY_SOURCES = UNIT_TEST_PROGRAMS = UNIT_TEST_DIR = t/unit-tests UNIT_TEST_BIN = $(UNIT_TEST_DIR)/bin # Having this variable in your environment would break pipelines because # you cause "cd" to echo its destination to stdout. It can also take # scripts to unexpected places. If you like CDPATH, define it for your # interactive shell sessions without exporting it. unexport CDPATH SCRIPT_SH += git-difftool--helper.sh SCRIPT_SH += git-filter-branch.sh SCRIPT_SH += git-merge-octopus.sh SCRIPT_SH += git-merge-one-file.sh SCRIPT_SH += git-merge-resolve.sh SCRIPT_SH += git-mergetool.sh SCRIPT_SH += git-quiltimport.sh SCRIPT_SH += git-request-pull.sh SCRIPT_SH += git-submodule.sh SCRIPT_SH += git-web--browse.sh SCRIPT_LIB += git-mergetool--lib SCRIPT_LIB += git-sh-i18n SCRIPT_LIB += git-sh-setup SCRIPT_PERL += git-archimport.perl SCRIPT_PERL += git-cvsexportcommit.perl SCRIPT_PERL += git-cvsimport.perl SCRIPT_PERL += git-cvsserver.perl SCRIPT_PERL += git-send-email.perl SCRIPT_PERL += git-svn.perl SCRIPT_PYTHON += git-p4.py # Generated files for scripts SCRIPT_SH_GEN = $(patsubst %.sh,%,$(SCRIPT_SH)) SCRIPT_PERL_GEN = $(patsubst %.perl,%,$(SCRIPT_PERL)) SCRIPT_PYTHON_GEN = $(patsubst %.py,%,$(SCRIPT_PYTHON)) # Individual rules to allow e.g. # "make -C ../.. SCRIPT_PERL=contrib/foo/bar.perl build-perl-script" # from subdirectories like contrib/*/ .PHONY: build-perl-script build-sh-script build-python-script build-perl-script: $(SCRIPT_PERL_GEN) build-sh-script: $(SCRIPT_SH_GEN) build-python-script: $(SCRIPT_PYTHON_GEN) .PHONY: install-perl-script install-sh-script install-python-script install-sh-script: $(SCRIPT_SH_GEN) $(INSTALL) $^ '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' install-perl-script: $(SCRIPT_PERL_GEN) $(INSTALL) $^ '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' install-python-script: $(SCRIPT_PYTHON_GEN) $(INSTALL) $^ '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' .PHONY: clean-perl-script clean-sh-script clean-python-script clean-sh-script: $(RM) $(SCRIPT_SH_GEN) clean-perl-script: $(RM) $(SCRIPT_PERL_GEN) clean-python-script: $(RM) $(SCRIPT_PYTHON_GEN) SCRIPTS = $(SCRIPT_SH_GEN) \ $(SCRIPT_PERL_GEN) \ $(SCRIPT_PYTHON_GEN) \ git-instaweb ETAGS_TARGET = TAGS # Empty... EXTRA_PROGRAMS = # ... and all the rest that could be moved out of bindir to gitexecdir PROGRAMS += $(EXTRA_PROGRAMS) PROGRAM_OBJS += daemon.o PROGRAM_OBJS += http-backend.o PROGRAM_OBJS += imap-send.o PROGRAM_OBJS += sh-i18n--envsubst.o PROGRAM_OBJS += shell.o .PHONY: program-objs program-objs: $(PROGRAM_OBJS) # Binary suffix, set to .exe for Windows builds X = PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS)) TEST_BUILTINS_OBJS += test-advise.o TEST_BUILTINS_OBJS += test-bitmap.o TEST_BUILTINS_OBJS += test-bloom.o TEST_BUILTINS_OBJS += test-bundle-uri.o TEST_BUILTINS_OBJS += test-cache-tree.o TEST_BUILTINS_OBJS += test-chmtime.o TEST_BUILTINS_OBJS += test-config.o TEST_BUILTINS_OBJS += test-crontab.o TEST_BUILTINS_OBJS += test-csprng.o TEST_BUILTINS_OBJS += test-date.o TEST_BUILTINS_OBJS += test-delete-gpgsig.o TEST_BUILTINS_OBJS += test-delta.o TEST_BUILTINS_OBJS += test-dir-iterator.o TEST_BUILTINS_OBJS += test-drop-caches.o TEST_BUILTINS_OBJS += test-dump-cache-tree.o TEST_BUILTINS_OBJS += test-dump-fsmonitor.o TEST_BUILTINS_OBJS += test-dump-split-index.o TEST_BUILTINS_OBJS += test-dump-untracked-cache.o TEST_BUILTINS_OBJS += test-env-helper.o TEST_BUILTINS_OBJS += test-example-tap.o TEST_BUILTINS_OBJS += test-find-pack.o TEST_BUILTINS_OBJS += test-fsmonitor-client.o TEST_BUILTINS_OBJS += test-genrandom.o TEST_BUILTINS_OBJS += test-genzeros.o TEST_BUILTINS_OBJS += test-getcwd.o TEST_BUILTINS_OBJS += test-hash-speed.o TEST_BUILTINS_OBJS += test-hash.o TEST_BUILTINS_OBJS += test-hashmap.o TEST_BUILTINS_OBJS += test-hexdump.o TEST_BUILTINS_OBJS += test-iconv.o TEST_BUILTINS_OBJS += test-json-writer.o TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o TEST_BUILTINS_OBJS += test-match-trees.o TEST_BUILTINS_OBJS += test-mergesort.o TEST_BUILTINS_OBJS += test-mktemp.o TEST_BUILTINS_OBJS += test-name-hash.o TEST_BUILTINS_OBJS += test-online-cpus.o TEST_BUILTINS_OBJS += test-pack-mtimes.o TEST_BUILTINS_OBJS += test-parse-options.o TEST_BUILTINS_OBJS += test-parse-pathspec-file.o TEST_BUILTINS_OBJS += test-partial-clone.o TEST_BUILTINS_OBJS += test-path-utils.o TEST_BUILTINS_OBJS += test-path-walk.o TEST_BUILTINS_OBJS += test-pcre2-config.o TEST_BUILTINS_OBJS += test-pkt-line.o TEST_BUILTINS_OBJS += test-proc-receive.o TEST_BUILTINS_OBJS += test-progress.o TEST_BUILTINS_OBJS += test-reach.o TEST_BUILTINS_OBJS += test-read-cache.o TEST_BUILTINS_OBJS += test-read-graph.o TEST_BUILTINS_OBJS += test-read-midx.o TEST_BUILTINS_OBJS += test-ref-store.o TEST_BUILTINS_OBJS += test-reftable.o TEST_BUILTINS_OBJS += test-regex.o TEST_BUILTINS_OBJS += test-rot13-filter.o TEST_BUILTINS_OBJS += test-repository.o TEST_BUILTINS_OBJS += test-revision-walking.o TEST_BUILTINS_OBJS += test-run-command.o TEST_BUILTINS_OBJS += test-scrap-cache-tree.o TEST_BUILTINS_OBJS += test-serve-v2.o TEST_BUILTINS_OBJS += test-sha1.o TEST_BUILTINS_OBJS += test-sha256.o TEST_BUILTINS_OBJS += test-sigchain.o TEST_BUILTINS_OBJS += test-simple-ipc.o TEST_BUILTINS_OBJS += test-string-list.o TEST_BUILTINS_OBJS += test-submodule-config.o TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o TEST_BUILTINS_OBJS += test-submodule.o TEST_BUILTINS_OBJS += test-subprocess.o TEST_BUILTINS_OBJS += test-trace2.o TEST_BUILTINS_OBJS += test-truncate.o TEST_BUILTINS_OBJS += test-userdiff.o TEST_BUILTINS_OBJS += test-wildmatch.o TEST_BUILTINS_OBJS += test-windows-named-pipe.o TEST_BUILTINS_OBJS += test-write-cache.o TEST_BUILTINS_OBJS += test-xml-encode.o # Do not add more tests here unless they have extra dependencies. Add # them in TEST_BUILTINS_OBJS above. TEST_PROGRAMS_NEED_X += test-fake-ssh TEST_PROGRAMS_NEED_X += test-tool TEST_PROGRAMS = $(patsubst %,t/helper/%$X,$(TEST_PROGRAMS_NEED_X)) # List built-in command $C whose implementation cmd_$C() is not in # builtin/$C.o but is linked in as part of some other command. BUILT_INS += $(patsubst builtin/%.o,git-%$X,$(BUILTIN_OBJS)) BUILT_INS += git-cherry$X BUILT_INS += git-cherry-pick$X BUILT_INS += git-format-patch$X BUILT_INS += git-fsck-objects$X BUILT_INS += git-init$X BUILT_INS += git-maintenance$X BUILT_INS += git-merge-subtree$X BUILT_INS += git-restore$X BUILT_INS += git-show$X BUILT_INS += git-stage$X BUILT_INS += git-status$X BUILT_INS += git-switch$X BUILT_INS += git-version$X BUILT_INS += git-whatchanged$X # what 'all' will build but not install in gitexecdir OTHER_PROGRAMS += git$X OTHER_PROGRAMS += scalar$X # what test wrappers are needed and 'install' will install, in bindir BINDIR_PROGRAMS_NEED_X += git BINDIR_PROGRAMS_NEED_X += scalar BINDIR_PROGRAMS_NEED_X += git-receive-pack BINDIR_PROGRAMS_NEED_X += git-shell BINDIR_PROGRAMS_NEED_X += git-upload-archive BINDIR_PROGRAMS_NEED_X += git-upload-pack BINDIR_PROGRAMS_NO_X += git-cvsserver # Set paths to tools early so that they can be used for version tests. ifndef SHELL_PATH SHELL_PATH = /bin/sh endif ifndef PERL_PATH PERL_PATH = /usr/bin/perl endif ifndef PYTHON_PATH PYTHON_PATH = /usr/bin/python endif export PERL_PATH export PYTHON_PATH TEST_SHELL_PATH = $(SHELL_PATH) LIB_FILE = libgit.a XDIFF_LIB = xdiff/lib.a REFTABLE_LIB = reftable/libreftable.a GENERATED_H += command-list.h GENERATED_H += config-list.h GENERATED_H += hook-list.h GENERATED_H += $(UNIT_TEST_DIR)/clar-decls.h GENERATED_H += $(UNIT_TEST_DIR)/clar.suite .PHONY: generated-hdrs generated-hdrs: $(GENERATED_H) ## Exhaustive lists of our source files, either dynamically generated, ## or hardcoded. SOURCES_CMD = ( \ git ls-files --deduplicate \ '*.[hcS]' \ '*.sh' \ ':!*[tp][0-9][0-9][0-9][0-9]*' \ ':!contrib' \ 2>/dev/null || \ $(FIND) . \ \( -name .git -type d -prune \) \ -o \( -name '[tp][0-9][0-9][0-9][0-9]*' -prune \) \ -o \( -name contrib -type d -prune \) \ -o \( -name build -type d -prune \) \ -o \( -name .build -type d -prune \) \ -o \( -name 'trash*' -type d -prune \) \ -o \( -name '*.[hcS]' -type f -print \) \ -o \( -name '*.sh' -type f -print \) \ | sed -e 's|^\./||' \ ) FOUND_SOURCE_FILES := $(filter-out $(GENERATED_H),$(shell $(SOURCES_CMD))) FOUND_C_SOURCES = $(filter %.c,$(FOUND_SOURCE_FILES)) FOUND_H_SOURCES = $(filter %.h,$(FOUND_SOURCE_FILES)) COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES)) LIB_H = $(FOUND_H_SOURCES) LIB_OBJS += abspath.o LIB_OBJS += add-interactive.o LIB_OBJS += add-patch.o LIB_OBJS += advice.o LIB_OBJS += alias.o LIB_OBJS += alloc.o LIB_OBJS += apply.o LIB_OBJS += archive-tar.o LIB_OBJS += archive-zip.o LIB_OBJS += archive.o LIB_OBJS += attr.o LIB_OBJS += base85.o LIB_OBJS += bisect.o LIB_OBJS += blame.o LIB_OBJS += blob.o LIB_OBJS += bloom.o LIB_OBJS += branch.o LIB_OBJS += bulk-checkin.o LIB_OBJS += bundle-uri.o LIB_OBJS += bundle.o LIB_OBJS += cache-tree.o LIB_OBJS += cbtree.o LIB_OBJS += chdir-notify.o LIB_OBJS += checkout.o LIB_OBJS += chunk-format.o LIB_OBJS += color.o LIB_OBJS += column.o LIB_OBJS += combine-diff.o LIB_OBJS += commit-graph.o LIB_OBJS += commit-reach.o LIB_OBJS += commit.o LIB_OBJS += compat/nonblock.o LIB_OBJS += compat/obstack.o LIB_OBJS += compat/terminal.o LIB_OBJS += compat/zlib-uncompress2.o LIB_OBJS += config.o LIB_OBJS += connect.o LIB_OBJS += connected.o LIB_OBJS += convert.o LIB_OBJS += copy.o LIB_OBJS += credential.o LIB_OBJS += csum-file.o LIB_OBJS += ctype.o LIB_OBJS += date.o LIB_OBJS += decorate.o LIB_OBJS += delta-islands.o LIB_OBJS += diagnose.o LIB_OBJS += diff-delta.o LIB_OBJS += diff-merges.o LIB_OBJS += diff-lib.o LIB_OBJS += diff-no-index.o LIB_OBJS += diff.o LIB_OBJS += diffcore-break.o LIB_OBJS += diffcore-delta.o LIB_OBJS += diffcore-order.o LIB_OBJS += diffcore-pickaxe.o LIB_OBJS += diffcore-rename.o LIB_OBJS += diffcore-rotate.o LIB_OBJS += dir-iterator.o LIB_OBJS += dir.o LIB_OBJS += editor.o LIB_OBJS += entry.o LIB_OBJS += environment.o LIB_OBJS += ewah/bitmap.o LIB_OBJS += ewah/ewah_bitmap.o LIB_OBJS += ewah/ewah_io.o LIB_OBJS += ewah/ewah_rlw.o LIB_OBJS += exec-cmd.o LIB_OBJS += fetch-negotiator.o LIB_OBJS += fetch-pack.o LIB_OBJS += fmt-merge-msg.o LIB_OBJS += fsck.o LIB_OBJS += fsmonitor.o LIB_OBJS += fsmonitor-ipc.o LIB_OBJS += fsmonitor-settings.o LIB_OBJS += gettext.o LIB_OBJS += git-zlib.o LIB_OBJS += gpg-interface.o LIB_OBJS += graph.o LIB_OBJS += grep.o LIB_OBJS += hash-lookup.o LIB_OBJS += hashmap.o LIB_OBJS += help.o LIB_OBJS += hex.o LIB_OBJS += hex-ll.o LIB_OBJS += hook.o LIB_OBJS += ident.o LIB_OBJS += json-writer.o LIB_OBJS += kwset.o LIB_OBJS += levenshtein.o LIB_OBJS += line-log.o LIB_OBJS += line-range.o LIB_OBJS += linear-assignment.o LIB_OBJS += list-objects-filter-options.o LIB_OBJS += list-objects-filter.o LIB_OBJS += list-objects.o LIB_OBJS += lockfile.o LIB_OBJS += log-tree.o LIB_OBJS += loose.o LIB_OBJS += ls-refs.o LIB_OBJS += mailinfo.o LIB_OBJS += mailmap.o LIB_OBJS += match-trees.o LIB_OBJS += mem-pool.o LIB_OBJS += merge-blobs.o LIB_OBJS += merge-ll.o LIB_OBJS += merge-ort.o LIB_OBJS += merge-ort-wrappers.o LIB_OBJS += merge-recursive.o LIB_OBJS += merge.o LIB_OBJS += midx.o LIB_OBJS += midx-write.o LIB_OBJS += name-hash.o LIB_OBJS += negotiator/default.o LIB_OBJS += negotiator/noop.o LIB_OBJS += negotiator/skipping.o LIB_OBJS += notes-cache.o LIB_OBJS += notes-merge.o LIB_OBJS += notes-utils.o LIB_OBJS += notes.o LIB_OBJS += object-file-convert.o LIB_OBJS += object-file.o LIB_OBJS += object-name.o LIB_OBJS += object.o LIB_OBJS += oid-array.o LIB_OBJS += oidmap.o LIB_OBJS += oidset.o LIB_OBJS += oidtree.o LIB_OBJS += pack-bitmap-write.o LIB_OBJS += pack-bitmap.o LIB_OBJS += pack-check.o LIB_OBJS += pack-mtimes.o LIB_OBJS += pack-objects.o LIB_OBJS += pack-revindex.o LIB_OBJS += pack-write.o LIB_OBJS += packfile.o LIB_OBJS += pager.o LIB_OBJS += parallel-checkout.o LIB_OBJS += parse.o LIB_OBJS += parse-options-cb.o LIB_OBJS += parse-options.o LIB_OBJS += patch-delta.o LIB_OBJS += patch-ids.o LIB_OBJS += path.o LIB_OBJS += path-walk.o LIB_OBJS += pathspec.o LIB_OBJS += pkt-line.o LIB_OBJS += preload-index.o LIB_OBJS += pretty.o LIB_OBJS += prio-queue.o LIB_OBJS += progress.o LIB_OBJS += promisor-remote.o LIB_OBJS += prompt.o LIB_OBJS += protocol.o LIB_OBJS += protocol-caps.o LIB_OBJS += prune-packed.o LIB_OBJS += pseudo-merge.o LIB_OBJS += quote.o LIB_OBJS += range-diff.o LIB_OBJS += reachable.o LIB_OBJS += read-cache.o LIB_OBJS += rebase-interactive.o LIB_OBJS += rebase.o LIB_OBJS += ref-filter.o LIB_OBJS += reflog-walk.o LIB_OBJS += reflog.o LIB_OBJS += refs.o LIB_OBJS += refs/debug.o LIB_OBJS += refs/files-backend.o LIB_OBJS += refs/reftable-backend.o LIB_OBJS += refs/iterator.o LIB_OBJS += refs/packed-backend.o LIB_OBJS += refs/ref-cache.o LIB_OBJS += refspec.o LIB_OBJS += remote.o LIB_OBJS += replace-object.o LIB_OBJS += repo-settings.o LIB_OBJS += repository.o LIB_OBJS += rerere.o LIB_OBJS += reset.o LIB_OBJS += resolve-undo.o LIB_OBJS += revision.o LIB_OBJS += run-command.o LIB_OBJS += send-pack.o LIB_OBJS += sequencer.o LIB_OBJS += serve.o LIB_OBJS += server-info.o LIB_OBJS += setup.o LIB_OBJS += shallow.o LIB_OBJS += sideband.o LIB_OBJS += sigchain.o LIB_OBJS += sparse-index.o LIB_OBJS += split-index.o LIB_OBJS += stable-qsort.o LIB_OBJS += statinfo.o LIB_OBJS += strbuf.o LIB_OBJS += streaming.o LIB_OBJS += string-list.o LIB_OBJS += strmap.o LIB_OBJS += strvec.o LIB_OBJS += sub-process.o LIB_OBJS += submodule-config.o LIB_OBJS += submodule.o LIB_OBJS += symlinks.o LIB_OBJS += tag.o LIB_OBJS += tempfile.o LIB_OBJS += thread-utils.o LIB_OBJS += tmp-objdir.o LIB_OBJS += trace.o LIB_OBJS += trace2.o LIB_OBJS += trace2/tr2_cfg.o LIB_OBJS += trace2/tr2_cmd_name.o LIB_OBJS += trace2/tr2_ctr.o LIB_OBJS += trace2/tr2_dst.o LIB_OBJS += trace2/tr2_sid.o LIB_OBJS += trace2/tr2_sysenv.o LIB_OBJS += trace2/tr2_tbuf.o LIB_OBJS += trace2/tr2_tgt_event.o LIB_OBJS += trace2/tr2_tgt_normal.o LIB_OBJS += trace2/tr2_tgt_perf.o LIB_OBJS += trace2/tr2_tls.o LIB_OBJS += trace2/tr2_tmr.o LIB_OBJS += trailer.o LIB_OBJS += transport-helper.o LIB_OBJS += transport.o LIB_OBJS += tree-diff.o LIB_OBJS += tree-walk.o LIB_OBJS += tree.o LIB_OBJS += unpack-trees.o LIB_OBJS += upload-pack.o LIB_OBJS += url.o LIB_OBJS += urlmatch.o LIB_OBJS += usage.o LIB_OBJS += userdiff.o LIB_OBJS += utf8.o LIB_OBJS += varint.o LIB_OBJS += version.o LIB_OBJS += versioncmp.o LIB_OBJS += walker.o LIB_OBJS += wildmatch.o LIB_OBJS += worktree.o LIB_OBJS += wrapper.o LIB_OBJS += write-or-die.o LIB_OBJS += ws.o LIB_OBJS += wt-status.o LIB_OBJS += xdiff-interface.o BUILTIN_OBJS += builtin/add.o BUILTIN_OBJS += builtin/am.o BUILTIN_OBJS += builtin/annotate.o BUILTIN_OBJS += builtin/apply.o BUILTIN_OBJS += builtin/archive.o BUILTIN_OBJS += builtin/backfill.o BUILTIN_OBJS += builtin/bisect.o BUILTIN_OBJS += builtin/blame.o BUILTIN_OBJS += builtin/branch.o BUILTIN_OBJS += builtin/bugreport.o BUILTIN_OBJS += builtin/bundle.o BUILTIN_OBJS += builtin/cat-file.o BUILTIN_OBJS += builtin/check-attr.o BUILTIN_OBJS += builtin/check-ignore.o BUILTIN_OBJS += builtin/check-mailmap.o BUILTIN_OBJS += builtin/check-ref-format.o BUILTIN_OBJS += builtin/checkout--worker.o BUILTIN_OBJS += builtin/checkout-index.o BUILTIN_OBJS += builtin/checkout.o BUILTIN_OBJS += builtin/clean.o BUILTIN_OBJS += builtin/clone.o BUILTIN_OBJS += builtin/column.o BUILTIN_OBJS += builtin/commit-graph.o BUILTIN_OBJS += builtin/commit-tree.o BUILTIN_OBJS += builtin/commit.o BUILTIN_OBJS += builtin/config.o BUILTIN_OBJS += builtin/count-objects.o BUILTIN_OBJS += builtin/credential-cache--daemon.o BUILTIN_OBJS += builtin/credential-cache.o BUILTIN_OBJS += builtin/credential-store.o BUILTIN_OBJS += builtin/credential.o BUILTIN_OBJS += builtin/describe.o BUILTIN_OBJS += builtin/diagnose.o BUILTIN_OBJS += builtin/diff-files.o BUILTIN_OBJS += builtin/diff-index.o BUILTIN_OBJS += builtin/diff-tree.o BUILTIN_OBJS += builtin/diff.o BUILTIN_OBJS += builtin/difftool.o BUILTIN_OBJS += builtin/fast-export.o BUILTIN_OBJS += builtin/fast-import.o BUILTIN_OBJS += builtin/fetch-pack.o BUILTIN_OBJS += builtin/fetch.o BUILTIN_OBJS += builtin/fmt-merge-msg.o BUILTIN_OBJS += builtin/for-each-ref.o BUILTIN_OBJS += builtin/for-each-repo.o BUILTIN_OBJS += builtin/fsck.o BUILTIN_OBJS += builtin/fsmonitor--daemon.o BUILTIN_OBJS += builtin/gc.o BUILTIN_OBJS += builtin/get-tar-commit-id.o BUILTIN_OBJS += builtin/grep.o BUILTIN_OBJS += builtin/hash-object.o BUILTIN_OBJS += builtin/help.o BUILTIN_OBJS += builtin/hook.o BUILTIN_OBJS += builtin/index-pack.o BUILTIN_OBJS += builtin/init-db.o BUILTIN_OBJS += builtin/interpret-trailers.o BUILTIN_OBJS += builtin/log.o BUILTIN_OBJS += builtin/ls-files.o BUILTIN_OBJS += builtin/ls-remote.o BUILTIN_OBJS += builtin/ls-tree.o BUILTIN_OBJS += builtin/mailinfo.o BUILTIN_OBJS += builtin/mailsplit.o BUILTIN_OBJS += builtin/merge-base.o BUILTIN_OBJS += builtin/merge-file.o BUILTIN_OBJS += builtin/merge-index.o BUILTIN_OBJS += builtin/merge-ours.o BUILTIN_OBJS += builtin/merge-recursive.o BUILTIN_OBJS += builtin/merge-tree.o BUILTIN_OBJS += builtin/merge.o BUILTIN_OBJS += builtin/mktag.o BUILTIN_OBJS += builtin/mktree.o BUILTIN_OBJS += builtin/multi-pack-index.o BUILTIN_OBJS += builtin/mv.o BUILTIN_OBJS += builtin/name-rev.o BUILTIN_OBJS += builtin/notes.o BUILTIN_OBJS += builtin/pack-objects.o BUILTIN_OBJS += builtin/pack-redundant.o BUILTIN_OBJS += builtin/pack-refs.o BUILTIN_OBJS += builtin/patch-id.o BUILTIN_OBJS += builtin/prune-packed.o BUILTIN_OBJS += builtin/prune.o BUILTIN_OBJS += builtin/pull.o BUILTIN_OBJS += builtin/push.o BUILTIN_OBJS += builtin/range-diff.o BUILTIN_OBJS += builtin/read-tree.o BUILTIN_OBJS += builtin/rebase.o BUILTIN_OBJS += builtin/receive-pack.o BUILTIN_OBJS += builtin/reflog.o BUILTIN_OBJS += builtin/refs.o BUILTIN_OBJS += builtin/remote-ext.o BUILTIN_OBJS += builtin/remote-fd.o BUILTIN_OBJS += builtin/remote.o BUILTIN_OBJS += builtin/repack.o BUILTIN_OBJS += builtin/replace.o BUILTIN_OBJS += builtin/replay.o BUILTIN_OBJS += builtin/rerere.o BUILTIN_OBJS += builtin/reset.o BUILTIN_OBJS += builtin/rev-list.o BUILTIN_OBJS += builtin/rev-parse.o BUILTIN_OBJS += builtin/revert.o BUILTIN_OBJS += builtin/rm.o BUILTIN_OBJS += builtin/send-pack.o BUILTIN_OBJS += builtin/shortlog.o BUILTIN_OBJS += builtin/show-branch.o BUILTIN_OBJS += builtin/show-index.o BUILTIN_OBJS += builtin/show-ref.o BUILTIN_OBJS += builtin/sparse-checkout.o BUILTIN_OBJS += builtin/stash.o BUILTIN_OBJS += builtin/stripspace.o BUILTIN_OBJS += builtin/submodule--helper.o BUILTIN_OBJS += builtin/survey.o BUILTIN_OBJS += builtin/symbolic-ref.o BUILTIN_OBJS += builtin/tag.o BUILTIN_OBJS += builtin/unpack-file.o BUILTIN_OBJS += builtin/unpack-objects.o BUILTIN_OBJS += builtin/update-index.o BUILTIN_OBJS += builtin/update-ref.o BUILTIN_OBJS += builtin/update-server-info.o BUILTIN_OBJS += builtin/upload-archive.o BUILTIN_OBJS += builtin/upload-pack.o BUILTIN_OBJS += builtin/var.o BUILTIN_OBJS += builtin/verify-commit.o BUILTIN_OBJS += builtin/verify-pack.o BUILTIN_OBJS += builtin/verify-tag.o BUILTIN_OBJS += builtin/worktree.o BUILTIN_OBJS += builtin/write-tree.o # THIRD_PARTY_SOURCES is a list of patterns compatible with the # $(filter) and $(filter-out) family of functions. They specify source # files which are taken from some third-party source where we want to be # less strict about issues such as coding style so we don't diverge from # upstream unnecessarily (making merging in future changes easier). THIRD_PARTY_SOURCES += compat/inet_ntop.c THIRD_PARTY_SOURCES += compat/inet_pton.c THIRD_PARTY_SOURCES += compat/mimalloc/% THIRD_PARTY_SOURCES += compat/nedmalloc/% THIRD_PARTY_SOURCES += compat/obstack.% THIRD_PARTY_SOURCES += compat/poll/% THIRD_PARTY_SOURCES += compat/regex/% THIRD_PARTY_SOURCES += sha1collisiondetection/% THIRD_PARTY_SOURCES += sha1dc/% THIRD_PARTY_SOURCES += $(UNIT_TEST_DIR)/clar/% THIRD_PARTY_SOURCES += $(UNIT_TEST_DIR)/clar/clar/% CLAR_TEST_SUITES += u-ctype CLAR_TEST_SUITES += u-strvec CLAR_TEST_SUITES += u-mingw CLAR_TEST_PROG = $(UNIT_TEST_BIN)/unit-tests$(X) CLAR_TEST_OBJS = $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(CLAR_TEST_SUITES)) CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/clar/clar.o CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/unit-test.o UNIT_TEST_PROGRAMS += t-example-decorate UNIT_TEST_PROGRAMS += t-hash UNIT_TEST_PROGRAMS += t-hashmap UNIT_TEST_PROGRAMS += t-mem-pool UNIT_TEST_PROGRAMS += t-oid-array UNIT_TEST_PROGRAMS += t-oidmap UNIT_TEST_PROGRAMS += t-oidtree UNIT_TEST_PROGRAMS += t-prio-queue UNIT_TEST_PROGRAMS += t-reftable-basics UNIT_TEST_PROGRAMS += t-reftable-block UNIT_TEST_PROGRAMS += t-reftable-merged UNIT_TEST_PROGRAMS += t-reftable-pq UNIT_TEST_PROGRAMS += t-reftable-reader UNIT_TEST_PROGRAMS += t-reftable-readwrite UNIT_TEST_PROGRAMS += t-reftable-record UNIT_TEST_PROGRAMS += t-reftable-stack UNIT_TEST_PROGRAMS += t-reftable-tree UNIT_TEST_PROGRAMS += t-strbuf UNIT_TEST_PROGRAMS += t-strcmp-offset UNIT_TEST_PROGRAMS += t-trailer UNIT_TEST_PROGRAMS += t-urlmatch-normalization UNIT_TEST_PROGS = $(patsubst %,$(UNIT_TEST_BIN)/%$X,$(UNIT_TEST_PROGRAMS)) UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/test-lib.o UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/lib-oid.o UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/lib-reftable.o # xdiff and reftable libs may in turn depend on what is in libgit.a GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE) EXTLIBS = GIT_USER_AGENT = git/$(GIT_VERSION) ifeq ($(wildcard sha1collisiondetection/lib/sha1.h),sha1collisiondetection/lib/sha1.h) DC_SHA1_SUBMODULE = auto endif # Set CFLAGS, LDFLAGS and other *FLAGS variables. These might be # tweaked by config.* below as well as the command-line, both of # which'll override these defaults. # Older versions of GCC may require adding "-std=gnu99" at the end. CFLAGS = -g -O2 -Wall LDFLAGS = CC_LD_DYNPATH = -Wl,-rpath, BASIC_CFLAGS = -I. BASIC_LDFLAGS = # library flags ARFLAGS = rcs PTHREAD_CFLAGS = # For the 'sparse' target SPARSE_FLAGS ?= -std=gnu99 SP_EXTRA_FLAGS = # For informing GIT-BUILD-OPTIONS of the SANITIZE=leak,address targets SANITIZE_LEAK = SANITIZE_ADDRESS = # For the 'coccicheck' target SPATCH_INCLUDE_FLAGS = --all-includes SPATCH_FLAGS = SPATCH_TEST_FLAGS = # If *.o files are present, have "coccicheck" depend on them, with # COMPUTE_HEADER_DEPENDENCIES this will speed up the common-case of # only needing to re-generate coccicheck results for the users of a # given API if it's changed, and not all files in the project. If # COMPUTE_HEADER_DEPENDENCIES=no this will be unset too. SPATCH_USE_O_DEPENDENCIES = YesPlease # Set SPATCH_CONCAT_COCCI to concatenate the contrib/cocci/*.cocci # files into a single contrib/cocci/ALL.cocci before running # "coccicheck". # # Pros: # # - Speeds up a one-shot run of "make coccicheck", as we won't have to # parse *.[ch] files N times for the N *.cocci rules # # Cons: # # - Will make incremental development of *.cocci slower, as # e.g. changing strbuf.cocci will re-run all *.cocci. # # - Makes error and performance analysis harder, as rules will be # applied from a monolithic ALL.cocci, rather than # e.g. strbuf.cocci. To work around this either undefine this, or # generate a specific patch, e.g. this will always use strbuf.cocci, # not ALL.cocci: # # make contrib/coccinelle/strbuf.cocci.patch SPATCH_CONCAT_COCCI = YesPlease # Rebuild 'coccicheck' if $(SPATCH), its flags etc. change TRACK_SPATCH_DEFINES = TRACK_SPATCH_DEFINES += $(SPATCH) TRACK_SPATCH_DEFINES += $(SPATCH_INCLUDE_FLAGS) TRACK_SPATCH_DEFINES += $(SPATCH_FLAGS) TRACK_SPATCH_DEFINES += $(SPATCH_TEST_FLAGS) GIT-SPATCH-DEFINES: FORCE @FLAGS='$(TRACK_SPATCH_DEFINES)'; \ if test x"$$FLAGS" != x"`cat GIT-SPATCH-DEFINES 2>/dev/null`" ; then \ echo >&2 " * new spatch flags"; \ echo "$$FLAGS" >GIT-SPATCH-DEFINES; \ fi include config.mak.uname -include config.mak.autogen -include config.mak ifdef DEVELOPER include config.mak.dev endif GIT-VERSION-FILE: FORCE @OLD=$$(cat $@ 2>/dev/null || :) && \ $(call version_gen,"$(shell pwd)",GIT-VERSION-FILE.in,$@) && \ NEW=$$(cat $@ 2>/dev/null || :) && \ if test "$$OLD" != "$$NEW"; then echo "$$NEW" >&2; fi # We need to set GIT_VERSION_OVERRIDE before including the version file as # otherwise any user-provided value for GIT_VERSION would have been overridden # already. GIT_VERSION_OVERRIDE := $(GIT_VERSION) -include GIT-VERSION-FILE # what 'all' will build and 'install' will install in gitexecdir, # excluding programs for built-in commands ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) ALL_COMMANDS_TO_INSTALL = $(ALL_PROGRAMS) ifeq (,$(SKIP_DASHED_BUILT_INS)) ALL_COMMANDS_TO_INSTALL += $(BUILT_INS) else # git-upload-pack, git-receive-pack and git-upload-archive are special: they # are _expected_ to be present in the `bin/` directory in their dashed form. ALL_COMMANDS_TO_INSTALL += git-receive-pack$(X) ALL_COMMANDS_TO_INSTALL += git-upload-archive$(X) ALL_COMMANDS_TO_INSTALL += git-upload-pack$(X) endif ALL_CFLAGS = $(DEVELOPER_CFLAGS) $(CPPFLAGS) $(CFLAGS) $(CFLAGS_APPEND) ALL_LDFLAGS = $(LDFLAGS) $(LDFLAGS_APPEND) ifdef SANITIZE SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag)) BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE) BASIC_CFLAGS += -fno-omit-frame-pointer ifneq ($(filter undefined,$(SANITIZERS)),) BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS endif ifneq ($(filter leak,$(SANITIZERS)),) BASIC_CFLAGS += -O0 SANITIZE_LEAK = YesCompiledWithIt endif ifneq ($(filter address,$(SANITIZERS)),) NO_REGEX = NeededForASAN SANITIZE_ADDRESS = YesCompiledWithIt endif endif ifndef sysconfdir ifeq ($(prefix),/usr) sysconfdir = /etc else sysconfdir = etc endif endif ifndef COMPUTE_HEADER_DEPENDENCIES COMPUTE_HEADER_DEPENDENCIES = auto endif ifeq ($(COMPUTE_HEADER_DEPENDENCIES),auto) dep_check = $(shell $(CC) $(ALL_CFLAGS) \ -Wno-pedantic \ -c -MF /dev/null -MQ /dev/null -MMD -MP \ -x c /dev/null -o /dev/null 2>&1; \ echo $$?) ifeq ($(dep_check),0) override COMPUTE_HEADER_DEPENDENCIES = yes else override COMPUTE_HEADER_DEPENDENCIES = no endif endif ifeq ($(COMPUTE_HEADER_DEPENDENCIES),yes) USE_COMPUTED_HEADER_DEPENDENCIES = YesPlease else ifneq ($(COMPUTE_HEADER_DEPENDENCIES),no) $(error please set COMPUTE_HEADER_DEPENDENCIES to yes, no, or auto \ (not "$(COMPUTE_HEADER_DEPENDENCIES)")) endif endif ifndef GENERATE_COMPILATION_DATABASE GENERATE_COMPILATION_DATABASE = no endif ifeq ($(GENERATE_COMPILATION_DATABASE),yes) compdb_check = $(shell $(CC) $(ALL_CFLAGS) \ -Wno-pedantic \ -c -MJ /dev/null \ -x c /dev/null -o /dev/null 2>&1; \ echo $$?) ifneq ($(compdb_check),0) override GENERATE_COMPILATION_DATABASE = no $(warning GENERATE_COMPILATION_DATABASE is set to "yes", but your compiler does not \ support generating compilation database entries) endif else ifneq ($(GENERATE_COMPILATION_DATABASE),no) $(error please set GENERATE_COMPILATION_DATABASE to "yes" or "no" \ (not "$(GENERATE_COMPILATION_DATABASE)")) endif endif ifdef SANE_TOOL_PATH SANE_TOOL_PATH_SQ = $(subst ','\'',$(SANE_TOOL_PATH)) BROKEN_PATH_FIX = s|^\# @BROKEN_PATH_FIX@$$|git_broken_path_fix "$(SANE_TOOL_PATH_SQ)"| PATH := $(SANE_TOOL_PATH):${PATH} else BROKEN_PATH_FIX = /^\# @BROKEN_PATH_FIX@$$/d endif ifeq (,$(HOST_CPU)) BASIC_CFLAGS += -DGIT_HOST_CPU="\"$(firstword $(subst -, ,$(uname_M)))\"" else BASIC_CFLAGS += -DGIT_HOST_CPU="\"$(HOST_CPU)\"" endif ifneq (,$(INLINE)) BASIC_CFLAGS += -Dinline=$(INLINE) endif ifneq (,$(SOCKLEN_T)) BASIC_CFLAGS += -Dsocklen_t=$(SOCKLEN_T) endif ifeq ($(uname_S),Darwin) ifndef NO_FINK ifeq ($(shell test -d /sw/lib && echo y),y) BASIC_CFLAGS += -I/sw/include BASIC_LDFLAGS += -L/sw/lib endif endif ifndef NO_DARWIN_PORTS ifeq ($(shell test -d /opt/local/lib && echo y),y) BASIC_CFLAGS += -I/opt/local/include BASIC_LDFLAGS += -L/opt/local/lib endif endif ifndef NO_APPLE_COMMON_CRYPTO NO_OPENSSL = YesPlease APPLE_COMMON_CRYPTO = YesPlease COMPAT_CFLAGS += -DAPPLE_COMMON_CRYPTO endif PTHREAD_LIBS = endif ifdef NO_LIBGEN_H COMPAT_CFLAGS += -DNO_LIBGEN_H COMPAT_OBJS += compat/basename.o endif ifdef USE_LIBPCRE1 $(error The USE_LIBPCRE1 build option has been removed, use version 2 with USE_LIBPCRE) endif USE_LIBPCRE2 ?= $(USE_LIBPCRE) ifneq (,$(USE_LIBPCRE2)) BASIC_CFLAGS += -DUSE_LIBPCRE2 EXTLIBS += -lpcre2-8 endif ifdef LIBPCREDIR BASIC_CFLAGS += -I$(LIBPCREDIR)/include EXTLIBS += $(call libpath_template,$(LIBPCREDIR)/$(lib)) endif ifdef HAVE_ALLOCA_H BASIC_CFLAGS += -DHAVE_ALLOCA_H endif IMAP_SEND_BUILDDEPS = IMAP_SEND_LDFLAGS = ifdef NO_CURL BASIC_CFLAGS += -DNO_CURL REMOTE_CURL_PRIMARY = REMOTE_CURL_ALIASES = REMOTE_CURL_NAMES = EXCLUDED_PROGRAMS += git-http-fetch git-http-push else ifdef CURLDIR # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case. CURL_CFLAGS = -I$(CURLDIR)/include CURL_LIBCURL = $(call libpath_template,$(CURLDIR)/$(lib)) else CURL_CFLAGS = CURL_LIBCURL = endif ifdef LAZYLOAD_LIBCURL LAZYLOAD_LIBCURL_OBJ = compat/lazyload-curl.o OBJECTS += $(LAZYLOAD_LIBCURL_OBJ) # The `CURL_STATICLIB` constant must be defined to avoid seeing the functions # declared as DLL imports CURL_CFLAGS = -DCURL_STATICLIB ifneq ($(uname_S),MINGW) ifneq ($(uname_S),Windows) CURL_LIBCURL = -ldl endif endif else ifndef CURL_LDFLAGS CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS) endif CURL_LIBCURL += $(CURL_LDFLAGS) endif ifndef CURL_CFLAGS CURL_CFLAGS = $(eval CURL_CFLAGS := $$(shell $$(CURL_CONFIG) --cflags))$(CURL_CFLAGS) endif BASIC_CFLAGS += $(CURL_CFLAGS) REMOTE_CURL_PRIMARY = git-remote-http$X REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES) PROGRAM_OBJS += http-fetch.o PROGRAMS += $(REMOTE_CURL_NAMES) ifndef NO_EXPAT PROGRAM_OBJS += http-push.o endif curl_check := $(shell (echo 072200; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p) ifeq "$(curl_check)" "072200" USE_CURL_FOR_IMAP_SEND = YesPlease endif ifdef USE_CURL_FOR_IMAP_SEND BASIC_CFLAGS += -DUSE_CURL_FOR_IMAP_SEND IMAP_SEND_BUILDDEPS = http.o $(LAZYLOAD_LIBCURL_OBJ) IMAP_SEND_LDFLAGS += $(CURL_LIBCURL) endif ifndef NO_EXPAT ifdef EXPATDIR BASIC_CFLAGS += -I$(EXPATDIR)/include EXPAT_LIBEXPAT = $(call libpath_template,$(EXPATDIR)/$(lib)) -lexpat else EXPAT_LIBEXPAT = -lexpat endif ifdef EXPAT_NEEDS_XMLPARSE_H BASIC_CFLAGS += -DEXPAT_NEEDS_XMLPARSE_H endif endif endif IMAP_SEND_LDFLAGS += $(OPENSSL_LINK) $(OPENSSL_LIBSSL) $(LIB_4_CRYPTO) ifdef ZLIB_PATH BASIC_CFLAGS += -I$(ZLIB_PATH)/include EXTLIBS += $(call libpath_template,$(ZLIB_PATH)/$(lib)) endif EXTLIBS += -lz ifndef NO_OPENSSL OPENSSL_LIBSSL = -lssl ifdef OPENSSLDIR BASIC_CFLAGS += -I$(OPENSSLDIR)/include OPENSSL_LINK = $(call libpath_template,$(OPENSSLDIR)/$(lib)) else OPENSSL_LINK = endif ifdef NEEDS_CRYPTO_WITH_SSL OPENSSL_LIBSSL += -lcrypto endif else BASIC_CFLAGS += -DNO_OPENSSL OPENSSL_LIBSSL = endif ifdef NO_OPENSSL LIB_4_CRYPTO = else ifdef NEEDS_SSL_WITH_CRYPTO LIB_4_CRYPTO = $(OPENSSL_LINK) -lcrypto -lssl else LIB_4_CRYPTO = $(OPENSSL_LINK) -lcrypto endif ifdef APPLE_COMMON_CRYPTO LIB_4_CRYPTO += -framework Security -framework CoreFoundation endif endif ifndef NO_ICONV ifdef NEEDS_LIBICONV ifdef ICONVDIR BASIC_CFLAGS += -I$(ICONVDIR)/include ICONV_LINK = $(call libpath_template,$(ICONVDIR)/$(lib)) else ICONV_LINK = endif ifdef NEEDS_LIBINTL_BEFORE_LIBICONV ICONV_LINK += -lintl endif EXTLIBS += $(ICONV_LINK) -liconv endif endif ifdef ICONV_OMITS_BOM BASIC_CFLAGS += -DICONV_OMITS_BOM endif ifdef NEEDS_LIBGEN EXTLIBS += -lgen endif ifndef NO_GETTEXT ifndef LIBC_CONTAINS_LIBINTL EXTLIBS += -lintl endif endif ifdef NEEDS_SOCKET EXTLIBS += -lsocket endif ifdef NEEDS_NSL EXTLIBS += -lnsl endif ifdef NEEDS_RESOLV EXTLIBS += -lresolv endif ifdef NO_D_TYPE_IN_DIRENT BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT endif ifdef NO_GECOS_IN_PWENT BASIC_CFLAGS += -DNO_GECOS_IN_PWENT endif ifdef NO_ST_BLOCKS_IN_STRUCT_STAT BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT endif ifdef USE_NSEC BASIC_CFLAGS += -DUSE_NSEC endif ifdef USE_ST_TIMESPEC BASIC_CFLAGS += -DUSE_ST_TIMESPEC endif ifdef NO_NORETURN BASIC_CFLAGS += -DNO_NORETURN endif ifdef NO_NSEC BASIC_CFLAGS += -DNO_NSEC endif ifdef SNPRINTF_RETURNS_BOGUS COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS COMPAT_OBJS += compat/snprintf.o endif ifdef FREAD_READS_DIRECTORIES COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES COMPAT_OBJS += compat/fopen.o endif ifdef OPEN_RETURNS_EINTR COMPAT_CFLAGS += -DOPEN_RETURNS_EINTR COMPAT_OBJS += compat/open.o endif ifdef NO_SYMLINK_HEAD BASIC_CFLAGS += -DNO_SYMLINK_HEAD endif ifdef NO_GETTEXT BASIC_CFLAGS += -DNO_GETTEXT USE_GETTEXT_SCHEME ?= fallthrough endif ifdef NO_POLL NO_POLL_H = YesPlease NO_SYS_POLL_H = YesPlease COMPAT_CFLAGS += -DNO_POLL -Icompat/poll COMPAT_OBJS += compat/poll/poll.o endif ifdef NO_STRCASESTR COMPAT_CFLAGS += -DNO_STRCASESTR COMPAT_OBJS += compat/strcasestr.o endif ifdef NO_STRLCPY COMPAT_CFLAGS += -DNO_STRLCPY COMPAT_OBJS += compat/strlcpy.o endif ifdef NO_STRTOUMAX COMPAT_CFLAGS += -DNO_STRTOUMAX COMPAT_OBJS += compat/strtoumax.o compat/strtoimax.o endif ifdef NO_STRTOULL COMPAT_CFLAGS += -DNO_STRTOULL endif ifdef NO_SETENV COMPAT_CFLAGS += -DNO_SETENV COMPAT_OBJS += compat/setenv.o endif ifdef NO_MKDTEMP COMPAT_CFLAGS += -DNO_MKDTEMP COMPAT_OBJS += compat/mkdtemp.o endif ifdef MKDIR_WO_TRAILING_SLASH COMPAT_CFLAGS += -DMKDIR_WO_TRAILING_SLASH COMPAT_OBJS += compat/mkdir.o endif ifdef NO_UNSETENV COMPAT_CFLAGS += -DNO_UNSETENV COMPAT_OBJS += compat/unsetenv.o endif ifdef NO_SYS_SELECT_H BASIC_CFLAGS += -DNO_SYS_SELECT_H endif ifdef NO_POLL_H BASIC_CFLAGS += -DNO_POLL_H endif ifdef NO_SYS_POLL_H BASIC_CFLAGS += -DNO_SYS_POLL_H endif ifdef NEEDS_SYS_PARAM_H BASIC_CFLAGS += -DNEEDS_SYS_PARAM_H endif ifdef NO_INTTYPES_H BASIC_CFLAGS += -DNO_INTTYPES_H endif ifdef NO_INITGROUPS BASIC_CFLAGS += -DNO_INITGROUPS endif ifdef NO_MMAP COMPAT_CFLAGS += -DNO_MMAP COMPAT_OBJS += compat/mmap.o else ifdef USE_WIN32_MMAP COMPAT_CFLAGS += -DUSE_WIN32_MMAP COMPAT_OBJS += compat/win32mmap.o endif endif ifdef MMAP_PREVENTS_DELETE BASIC_CFLAGS += -DMMAP_PREVENTS_DELETE endif ifdef OBJECT_CREATION_USES_RENAMES COMPAT_CFLAGS += -DOBJECT_CREATION_MODE=1 endif ifdef NO_STRUCT_ITIMERVAL COMPAT_CFLAGS += -DNO_STRUCT_ITIMERVAL NO_SETITIMER = YesPlease endif ifdef NO_SETITIMER COMPAT_CFLAGS += -DNO_SETITIMER endif ifdef NO_PREAD COMPAT_CFLAGS += -DNO_PREAD COMPAT_OBJS += compat/pread.o endif ifdef NO_FAST_WORKING_DIRECTORY BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY endif ifdef NO_TRUSTABLE_FILEMODE BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE endif ifdef NEEDS_MODE_TRANSLATION COMPAT_CFLAGS += -DNEEDS_MODE_TRANSLATION COMPAT_OBJS += compat/stat.o endif ifdef NO_IPV6 BASIC_CFLAGS += -DNO_IPV6 endif ifdef NO_INTPTR_T COMPAT_CFLAGS += -DNO_INTPTR_T endif ifdef NO_UINTMAX_T BASIC_CFLAGS += -Duintmax_t=uint32_t endif ifdef NO_SOCKADDR_STORAGE ifdef NO_IPV6 BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in else BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6 endif endif ifdef NO_INET_NTOP LIB_OBJS += compat/inet_ntop.o BASIC_CFLAGS += -DNO_INET_NTOP endif ifdef NO_INET_PTON LIB_OBJS += compat/inet_pton.o BASIC_CFLAGS += -DNO_INET_PTON endif ifdef NO_UNIX_SOCKETS BASIC_CFLAGS += -DNO_UNIX_SOCKETS else LIB_OBJS += unix-socket.o LIB_OBJS += unix-stream-server.o endif # Simple IPC requires threads and platform-specific IPC support. # Only platforms that have both should include these source files # in the build. # # On Windows-based systems, Simple IPC requires threads and Windows # Named Pipes. These are always available, so Simple IPC support # is optional. # # On Unix-based systems, Simple IPC requires pthreads and Unix # domain sockets. So support is only enabled when both are present. # ifdef USE_WIN32_IPC BASIC_CFLAGS += -DSUPPORTS_SIMPLE_IPC LIB_OBJS += compat/simple-ipc/ipc-shared.o LIB_OBJS += compat/simple-ipc/ipc-win32.o else ifndef NO_PTHREADS ifndef NO_UNIX_SOCKETS BASIC_CFLAGS += -DSUPPORTS_SIMPLE_IPC LIB_OBJS += compat/simple-ipc/ipc-shared.o LIB_OBJS += compat/simple-ipc/ipc-unix-socket.o endif endif endif ifdef NO_ICONV BASIC_CFLAGS += -DNO_ICONV endif ifdef OLD_ICONV BASIC_CFLAGS += -DOLD_ICONV endif ifdef NO_DEFLATE_BOUND BASIC_CFLAGS += -DNO_DEFLATE_BOUND endif ifdef NO_POSIX_GOODIES BASIC_CFLAGS += -DNO_POSIX_GOODIES endif ifdef APPLE_COMMON_CRYPTO_SHA1 # Apple CommonCrypto requires chunking SHA1_MAX_BLOCK_SIZE = 1024L*1024L*1024L endif ifdef PPC_SHA1 $(error the PPC_SHA1 flag has been removed along with the PowerPC-specific SHA-1 implementation.) endif ifdef OPENSSL_SHA1 EXTLIBS += $(LIB_4_CRYPTO) BASIC_CFLAGS += -DSHA1_OPENSSL else ifdef BLK_SHA1 LIB_OBJS += block-sha1/sha1.o BASIC_CFLAGS += -DSHA1_BLK else ifdef APPLE_COMMON_CRYPTO_SHA1 COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL BASIC_CFLAGS += -DSHA1_APPLE else BASIC_CFLAGS += -DSHA1_DC LIB_OBJS += sha1dc_git.o ifdef DC_SHA1_EXTERNAL ifdef DC_SHA1_SUBMODULE ifneq ($(DC_SHA1_SUBMODULE),auto) $(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both) endif endif BASIC_CFLAGS += -DDC_SHA1_EXTERNAL EXTLIBS += -lsha1detectcoll else ifdef DC_SHA1_SUBMODULE LIB_OBJS += sha1collisiondetection/lib/sha1.o LIB_OBJS += sha1collisiondetection/lib/ubc_check.o BASIC_CFLAGS += -DDC_SHA1_SUBMODULE else LIB_OBJS += sha1dc/sha1.o LIB_OBJS += sha1dc/ubc_check.o endif BASIC_CFLAGS += \ -DSHA1DC_NO_STANDARD_INCLUDES \ -DSHA1DC_INIT_SAFE_HASH_DEFAULT=0 \ -DSHA1DC_CUSTOM_INCLUDE_SHA1_C="\"git-compat-util.h\"" \ -DSHA1DC_CUSTOM_INCLUDE_UBC_CHECK_C="\"git-compat-util.h\"" endif endif endif endif ifdef OPENSSL_SHA1_UNSAFE ifndef OPENSSL_SHA1 EXTLIBS += $(LIB_4_CRYPTO) BASIC_CFLAGS += -DSHA1_OPENSSL_UNSAFE endif else ifdef BLK_SHA1_UNSAFE ifndef BLK_SHA1 LIB_OBJS += block-sha1/sha1.o BASIC_CFLAGS += -DSHA1_BLK_UNSAFE endif else ifdef APPLE_COMMON_CRYPTO_SHA1_UNSAFE ifndef APPLE_COMMON_CRYPTO_SHA1 COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL BASIC_CFLAGS += -DSHA1_APPLE_UNSAFE endif endif endif endif ifdef OPENSSL_SHA256 EXTLIBS += $(LIB_4_CRYPTO) BASIC_CFLAGS += -DSHA256_OPENSSL else ifdef NETTLE_SHA256 BASIC_CFLAGS += -DSHA256_NETTLE EXTLIBS += -lnettle else ifdef GCRYPT_SHA256 BASIC_CFLAGS += -DSHA256_GCRYPT EXTLIBS += -lgcrypt else LIB_OBJS += sha256/block/sha256.o BASIC_CFLAGS += -DSHA256_BLK endif endif endif ifdef SHA1_MAX_BLOCK_SIZE LIB_OBJS += compat/sha1-chunked.o BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)" endif ifdef NO_HSTRERROR COMPAT_CFLAGS += -DNO_HSTRERROR COMPAT_OBJS += compat/hstrerror.o endif ifdef NO_MEMMEM COMPAT_CFLAGS += -DNO_MEMMEM COMPAT_OBJS += compat/memmem.o endif ifdef NO_GETPAGESIZE COMPAT_CFLAGS += -DNO_GETPAGESIZE endif ifdef INTERNAL_QSORT COMPAT_CFLAGS += -DINTERNAL_QSORT endif ifdef HAVE_ISO_QSORT_S COMPAT_CFLAGS += -DHAVE_ISO_QSORT_S else COMPAT_OBJS += compat/qsort_s.o endif ifdef RUNTIME_PREFIX COMPAT_CFLAGS += -DRUNTIME_PREFIX endif ifdef NO_PTHREADS BASIC_CFLAGS += -DNO_PTHREADS else BASIC_CFLAGS += $(PTHREAD_CFLAGS) EXTLIBS += $(PTHREAD_LIBS) endif ifdef HAVE_PATHS_H BASIC_CFLAGS += -DHAVE_PATHS_H endif ifdef HAVE_LIBCHARSET_H BASIC_CFLAGS += -DHAVE_LIBCHARSET_H EXTLIBS += $(CHARSET_LIB) endif ifdef HAVE_STRINGS_H BASIC_CFLAGS += -DHAVE_STRINGS_H endif ifdef HAVE_DEV_TTY BASIC_CFLAGS += -DHAVE_DEV_TTY endif ifdef DIR_HAS_BSD_GROUP_SEMANTICS COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS endif ifdef UNRELIABLE_FSTAT BASIC_CFLAGS += -DUNRELIABLE_FSTAT endif ifdef NO_REGEX COMPAT_CFLAGS += -Icompat/regex COMPAT_OBJS += compat/regex/regex.o else ifdef USE_ENHANCED_BASIC_REGULAR_EXPRESSIONS COMPAT_CFLAGS += -DUSE_ENHANCED_BASIC_REGULAR_EXPRESSIONS COMPAT_OBJS += compat/regcomp_enhanced.o endif endif ifdef NATIVE_CRLF BASIC_CFLAGS += -DNATIVE_CRLF endif ifdef USE_NED_ALLOCATOR COMPAT_CFLAGS += -Icompat/nedmalloc COMPAT_OBJS += compat/nedmalloc/nedmalloc.o OVERRIDE_STRDUP = YesPlease endif ifdef USE_MIMALLOC MIMALLOC_OBJS = \ compat/mimalloc/alloc-aligned.o \ compat/mimalloc/alloc.o \ compat/mimalloc/arena.o \ compat/mimalloc/bitmap.o \ compat/mimalloc/heap.o \ compat/mimalloc/init.o \ compat/mimalloc/options.o \ compat/mimalloc/os.o \ compat/mimalloc/page.o \ compat/mimalloc/random.o \ compat/mimalloc/prim/windows/prim.o \ compat/mimalloc/segment.o \ compat/mimalloc/segment-cache.o \ compat/mimalloc/segment-map.o \ compat/mimalloc/stats.o COMPAT_CFLAGS += -Icompat/mimalloc -DMI_DEBUG=0 -DUSE_MIMALLOC --std=gnu11 COMPAT_OBJS += $(MIMALLOC_OBJS) $(MIMALLOC_OBJS): COMPAT_CFLAGS += -DBANNED_H $(MIMALLOC_OBJS): COMPAT_CFLAGS += \ -Wno-attributes \ -Wno-unknown-pragmas \ -Wno-array-bounds ifdef DEVELOPER $(MIMALLOC_OBJS): COMPAT_CFLAGS += \ -Wno-pedantic \ -Wno-declaration-after-statement \ -Wno-old-style-definition \ -Wno-missing-prototypes endif endif ifdef OVERRIDE_STRDUP COMPAT_CFLAGS += -DOVERRIDE_STRDUP COMPAT_OBJS += compat/strdup.o endif ifdef GIT_TEST_CMP_USE_COPIED_CONTEXT export GIT_TEST_CMP_USE_COPIED_CONTEXT endif ifndef NO_MSGFMT_EXTENDED_OPTIONS MSGFMT += --check endif ifdef HAVE_CLOCK_GETTIME BASIC_CFLAGS += -DHAVE_CLOCK_GETTIME endif ifdef HAVE_CLOCK_MONOTONIC BASIC_CFLAGS += -DHAVE_CLOCK_MONOTONIC endif ifdef HAVE_SYNC_FILE_RANGE BASIC_CFLAGS += -DHAVE_SYNC_FILE_RANGE endif ifdef NEEDS_LIBRT EXTLIBS += -lrt endif ifdef HAVE_BSD_SYSCTL BASIC_CFLAGS += -DHAVE_BSD_SYSCTL endif ifdef HAVE_BSD_KERN_PROC_SYSCTL BASIC_CFLAGS += -DHAVE_BSD_KERN_PROC_SYSCTL endif ifdef HAVE_GETDELIM BASIC_CFLAGS += -DHAVE_GETDELIM endif ifneq ($(findstring arc4random,$(CSPRNG_METHOD)),) BASIC_CFLAGS += -DHAVE_ARC4RANDOM endif ifneq ($(findstring libbsd,$(CSPRNG_METHOD)),) BASIC_CFLAGS += -DHAVE_ARC4RANDOM_LIBBSD EXTLIBS += -lbsd endif ifneq ($(findstring getrandom,$(CSPRNG_METHOD)),) BASIC_CFLAGS += -DHAVE_GETRANDOM endif ifneq ($(findstring getentropy,$(CSPRNG_METHOD)),) BASIC_CFLAGS += -DHAVE_GETENTROPY endif ifneq ($(findstring rtlgenrandom,$(CSPRNG_METHOD)),) BASIC_CFLAGS += -DHAVE_RTLGENRANDOM endif ifneq ($(findstring openssl,$(CSPRNG_METHOD)),) BASIC_CFLAGS += -DHAVE_OPENSSL_CSPRNG EXTLIBS += -lcrypto -lssl endif ifneq ($(PROCFS_EXECUTABLE_PATH),) procfs_executable_path_SQ = $(subst ','\'',$(PROCFS_EXECUTABLE_PATH)) BASIC_CFLAGS += '-DPROCFS_EXECUTABLE_PATH="$(procfs_executable_path_SQ)"' endif ifndef HAVE_PLATFORM_PROCINFO COMPAT_OBJS += compat/stub/procinfo.o endif ifdef HAVE_NS_GET_EXECUTABLE_PATH BASIC_CFLAGS += -DHAVE_NS_GET_EXECUTABLE_PATH endif ifdef HAVE_ZOS_GET_EXECUTABLE_PATH BASIC_CFLAGS += -DHAVE_ZOS_GET_EXECUTABLE_PATH endif ifdef HAVE_WPGMPTR BASIC_CFLAGS += -DHAVE_WPGMPTR endif ifdef FILENO_IS_A_MACRO COMPAT_CFLAGS += -DFILENO_IS_A_MACRO COMPAT_OBJS += compat/fileno.o endif ifdef NEED_ACCESS_ROOT_HANDLER COMPAT_CFLAGS += -DNEED_ACCESS_ROOT_HANDLER COMPAT_OBJS += compat/access.o endif ifdef FSMONITOR_DAEMON_BACKEND COMPAT_CFLAGS += -DHAVE_FSMONITOR_DAEMON_BACKEND COMPAT_OBJS += compat/fsmonitor/fsm-listen-$(FSMONITOR_DAEMON_BACKEND).o COMPAT_OBJS += compat/fsmonitor/fsm-health-$(FSMONITOR_DAEMON_BACKEND).o COMPAT_OBJS += compat/fsmonitor/fsm-ipc-$(FSMONITOR_DAEMON_BACKEND).o endif ifdef FSMONITOR_OS_SETTINGS COMPAT_CFLAGS += -DHAVE_FSMONITOR_OS_SETTINGS COMPAT_OBJS += compat/fsmonitor/fsm-settings-$(FSMONITOR_OS_SETTINGS).o COMPAT_OBJS += compat/fsmonitor/fsm-path-utils-$(FSMONITOR_OS_SETTINGS).o endif ifeq ($(TCLTK_PATH),) NO_TCLTK = NoThanks endif ifeq ($(PERL_PATH),) NO_PERL = NoThanks endif ifeq ($(PYTHON_PATH),) NO_PYTHON = NoThanks endif ifndef PAGER_ENV PAGER_ENV = LESS=FRX LV=-c endif ifdef NO_INSTALL_HARDLINKS export NO_INSTALL_HARDLINKS endif ### profile feedback build # # Can adjust this to be a global directory if you want to do extended # data gathering PROFILE_DIR := $(CURDIR) ifeq ("$(PROFILE)","GEN") BASIC_CFLAGS += -fprofile-generate=$(PROFILE_DIR) -DNO_NORETURN=1 EXTLIBS += -lgcov export CCACHE_DISABLE = t V = 1 else ifneq ("$(PROFILE)","") BASIC_CFLAGS += -fprofile-use=$(PROFILE_DIR) -fprofile-correction -DNO_NORETURN=1 export CCACHE_DISABLE = t V = 1 endif endif # Shell quote (do not use $(call) to accommodate ancient setups); ETC_GITCONFIG_SQ = $(subst ','\'',$(ETC_GITCONFIG)) ETC_GITATTRIBUTES_SQ = $(subst ','\'',$(ETC_GITATTRIBUTES)) DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) NO_GETTEXT_SQ = $(subst ','\'',$(NO_GETTEXT)) bindir_SQ = $(subst ','\'',$(bindir)) bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) mandir_SQ = $(subst ','\'',$(mandir)) mandir_relative_SQ = $(subst ','\'',$(mandir_relative)) infodir_relative_SQ = $(subst ','\'',$(infodir_relative)) perllibdir_SQ = $(subst ','\'',$(perllibdir)) localedir_SQ = $(subst ','\'',$(localedir)) localedir_relative_SQ = $(subst ','\'',$(localedir_relative)) gitexecdir_SQ = $(subst ','\'',$(gitexecdir)) gitexecdir_relative_SQ = $(subst ','\'',$(gitexecdir_relative)) template_dir_SQ = $(subst ','\'',$(template_dir)) htmldir_relative_SQ = $(subst ','\'',$(htmldir_relative)) prefix_SQ = $(subst ','\'',$(prefix)) perllibdir_relative_SQ = $(subst ','\'',$(perllibdir_relative)) gitwebdir_SQ = $(subst ','\'',$(gitwebdir)) gitwebstaticdir_SQ = $(subst ','\'',$(gitwebstaticdir)) SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) TEST_SHELL_PATH_SQ = $(subst ','\'',$(TEST_SHELL_PATH)) PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH)) PYTHON_PATH_SQ = $(subst ','\'',$(PYTHON_PATH)) TCLTK_PATH_SQ = $(subst ','\'',$(TCLTK_PATH)) DIFF_SQ = $(subst ','\'',$(DIFF)) PERLLIB_EXTRA_SQ = $(subst ','\'',$(PERLLIB_EXTRA)) # RUNTIME_PREFIX's resolution logic requires resource paths to be expressed # relative to each other and share an installation path. # # This is a dependency in: # - Git's binary RUNTIME_PREFIX logic in (see "exec_cmd.c"). # - The runtime prefix Perl header (see # "perl/header_templates/runtime_prefix.template.pl"). ifdef RUNTIME_PREFIX ifneq ($(filter /%,$(firstword $(gitexecdir_relative))),) $(error RUNTIME_PREFIX requires a relative gitexecdir, not: $(gitexecdir)) endif ifneq ($(filter /%,$(firstword $(localedir_relative))),) $(error RUNTIME_PREFIX requires a relative localedir, not: $(localedir)) endif ifndef NO_PERL ifneq ($(filter /%,$(firstword $(perllibdir_relative))),) $(error RUNTIME_PREFIX requires a relative perllibdir, not: $(perllibdir)) endif endif endif # We must filter out any object files from $(GITLIBS), # as it is typically used like: # # foo: foo.o $(GITLIBS) # $(CC) $(filter %.o,$^) $(LIBS) # # where we use it as a dependency. Since we also pull object files # from the dependency list, that would make each entry appear twice. LIBS = $(filter-out %.o, $(GITLIBS)) $(EXTLIBS) BASIC_CFLAGS += $(COMPAT_CFLAGS) LIB_OBJS += $(COMPAT_OBJS) # Quote for C ifdef DEFAULT_EDITOR DEFAULT_EDITOR_CQ = "$(subst ",\",$(subst \,\\,$(DEFAULT_EDITOR)))" DEFAULT_EDITOR_CQ_SQ = $(subst ','\'',$(DEFAULT_EDITOR_CQ)) BASIC_CFLAGS += -DDEFAULT_EDITOR='$(DEFAULT_EDITOR_CQ_SQ)' endif ifdef DEFAULT_PAGER DEFAULT_PAGER_CQ = "$(subst ",\",$(subst \,\\,$(DEFAULT_PAGER)))" DEFAULT_PAGER_CQ_SQ = $(subst ','\'',$(DEFAULT_PAGER_CQ)) BASIC_CFLAGS += -DDEFAULT_PAGER='$(DEFAULT_PAGER_CQ_SQ)' endif ifdef SHELL_PATH SHELL_PATH_CQ = "$(subst ",\",$(subst \,\\,$(SHELL_PATH)))" SHELL_PATH_CQ_SQ = $(subst ','\'',$(SHELL_PATH_CQ)) BASIC_CFLAGS += -DSHELL_PATH='$(SHELL_PATH_CQ_SQ)' endif GIT_USER_AGENT_SQ = $(subst ','\'',$(GIT_USER_AGENT)) GIT_USER_AGENT_CQ = "$(subst ",\",$(subst \,\\,$(GIT_USER_AGENT)))" GIT_USER_AGENT_CQ_SQ = $(subst ','\'',$(GIT_USER_AGENT_CQ)) GIT-USER-AGENT: FORCE @if test x'$(GIT_USER_AGENT_SQ)' != x"`cat GIT-USER-AGENT 2>/dev/null`"; then \ echo '$(GIT_USER_AGENT_SQ)' >GIT-USER-AGENT; \ fi ifdef DEFAULT_HELP_FORMAT BASIC_CFLAGS += -DDEFAULT_HELP_FORMAT='"$(DEFAULT_HELP_FORMAT)"' endif ALL_CFLAGS += $(BASIC_CFLAGS) ALL_LDFLAGS += $(BASIC_LDFLAGS) export DIFF TAR INSTALL DESTDIR SHELL_PATH ### Build rules SHELL = $(SHELL_PATH) all:: shell_compatibility_test ifeq "$(PROFILE)" "BUILD" all:: profile endif profile:: profile-clean $(MAKE) PROFILE=GEN all $(MAKE) PROFILE=GEN -j1 test @if test -n "$$GIT_PERF_REPO" || test -d .git; then \ $(MAKE) PROFILE=GEN -j1 perf; \ else \ echo "Skipping profile of perf tests..."; \ fi $(MAKE) PROFILE=USE all profile-fast: profile-clean $(MAKE) PROFILE=GEN all $(MAKE) PROFILE=GEN -j1 perf $(MAKE) PROFILE=USE all all:: $(ALL_COMMANDS_TO_INSTALL) $(SCRIPT_LIB) $(OTHER_PROGRAMS) GIT-BUILD-OPTIONS ifneq (,$X) $(QUIET_BUILT_IN)$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_COMMANDS_TO_INSTALL) $(OTHER_PROGRAMS))), if test ! -d '$p' && test ! '$p' -ef '$p$X'; then $(RM) '$p'; fi;) endif all:: ifndef NO_TCLTK $(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all $(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all endif $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)' # If you add a new fuzzer, please also make sure to run it in # ci/run-build-and-minimal-fuzzers.sh so that we make sure it still links and # runs in the future. FUZZ_OBJS += oss-fuzz/dummy-cmd-main.o FUZZ_OBJS += oss-fuzz/fuzz-commit-graph.o FUZZ_OBJS += oss-fuzz/fuzz-config.o FUZZ_OBJS += oss-fuzz/fuzz-credential-from-url-gently.o FUZZ_OBJS += oss-fuzz/fuzz-date.o FUZZ_OBJS += oss-fuzz/fuzz-pack-headers.o FUZZ_OBJS += oss-fuzz/fuzz-pack-idx.o FUZZ_OBJS += oss-fuzz/fuzz-parse-attr-line.o FUZZ_OBJS += oss-fuzz/fuzz-url-decode-mem.o .PHONY: fuzz-objs fuzz-objs: $(FUZZ_OBJS) # Always build fuzz objects even if not testing, to prevent bit-rot. all:: $(FUZZ_OBJS) FUZZ_PROGRAMS += $(patsubst %.o,%,$(filter-out %dummy-cmd-main.o,$(FUZZ_OBJS))) # Build fuzz programs when possible, even without the necessary fuzzing support, # to prevent bit-rot. ifdef LINK_FUZZ_PROGRAMS all:: $(FUZZ_PROGRAMS) endif please_set_SHELL_PATH_to_a_more_modern_shell: @$$(:) shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell strip: $(PROGRAMS) git$X $(STRIP) $(STRIP_OPTS) $^ ### Target-specific flags and dependencies # The generic compilation pattern rule and automatically # computed header dependencies (falling back to a dependency on # LIB_H) are enough to describe how most targets should be built, # but some targets are special enough to need something a little # different. # # - When a source file "foo.c" #includes a generated header file, # we need to list that dependency for the "foo.o" target. # # We also list it from other targets that are built from foo.c # like "foo.sp" and "foo.s", even though that is easy to forget # to do because the generated header is already present around # after a regular build attempt. # # - Some code depends on configuration kept in makefile # variables. The target-specific variable EXTRA_CPPFLAGS can # be used to convey that information to the C preprocessor # using -D options. # # The "foo.o" target should have a corresponding dependency on # a file that changes when the value of the makefile variable # changes. For example, targets making use of the # $(GIT_VERSION) variable depend on GIT-VERSION-FILE. # # Technically the ".sp" and ".s" targets do not need this # dependency because they are force-built, but they get the # same dependency for consistency. This way, you do not have to # know how each target is implemented. And it means the # dependencies here will not need to change if the force-build # details change some day. git.sp git.s git.o: GIT-PREFIX git.sp git.s git.o: EXTRA_CPPFLAGS = \ '-DGIT_HTML_PATH="$(htmldir_relative_SQ)"' \ '-DGIT_MAN_PATH="$(mandir_relative_SQ)"' \ '-DGIT_INFO_PATH="$(infodir_relative_SQ)"' git$X: git.o GIT-LDFLAGS $(BUILTIN_OBJS) $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \ $(filter %.o,$^) $(LIBS) help.sp help.s help.o: command-list.h builtin/bugreport.sp builtin/bugreport.s builtin/bugreport.o: hook-list.h builtin/help.sp builtin/help.s builtin/help.o: config-list.h GIT-PREFIX builtin/help.sp builtin/help.s builtin/help.o: EXTRA_CPPFLAGS = \ '-DGIT_HTML_PATH="$(htmldir_relative_SQ)"' \ '-DGIT_MAN_PATH="$(mandir_relative_SQ)"' \ '-DGIT_INFO_PATH="$(infodir_relative_SQ)"' PAGER_ENV_SQ = $(subst ','\'',$(PAGER_ENV)) PAGER_ENV_CQ = "$(subst ",\",$(subst \,\\,$(PAGER_ENV)))" PAGER_ENV_CQ_SQ = $(subst ','\'',$(PAGER_ENV_CQ)) pager.sp pager.s pager.o: EXTRA_CPPFLAGS = \ -DPAGER_ENV='$(PAGER_ENV_CQ_SQ)' version-def.h: version-def.h.in GIT-VERSION-GEN GIT-VERSION-FILE GIT-USER-AGENT $(QUIET_GEN)$(call version_gen,"$(shell pwd)",$<,$@) version.sp version.s version.o: version-def.h $(BUILT_INS): git$X $(QUIET_BUILT_IN)$(RM) $@ && \ ln $< $@ 2>/dev/null || \ ln -s $< $@ 2>/dev/null || \ cp $< $@ config-list.h: generate-configlist.sh config-list.h: Documentation/*config.txt Documentation/config/*.txt $(QUIET_GEN)$(SHELL_PATH) ./generate-configlist.sh . $@ command-list.h: generate-cmdlist.sh command-list.txt command-list.h: $(wildcard Documentation/git*.txt) $(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh \ $(patsubst %,--exclude-program %,$(EXCLUDED_PROGRAMS)) \ . $@ hook-list.h: generate-hooklist.sh Documentation/githooks.txt $(QUIET_GEN)$(SHELL_PATH) ./generate-hooklist.sh . $@ SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):\ $(localedir_SQ):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\ $(gitwebdir_SQ):$(PERL_PATH_SQ):$(PAGER_ENV):\ $(perllibdir_SQ) GIT-SCRIPT-DEFINES: FORCE @FLAGS='$(SCRIPT_DEFINES)'; \ if test x"$$FLAGS" != x"`cat $@ 2>/dev/null`" ; then \ echo >&2 " * new script parameters"; \ echo "$$FLAGS" >$@; \ fi $(SCRIPT_SH_GEN) $(SCRIPT_LIB) : % : %.sh generate-script.sh GIT-BUILD-OPTIONS GIT-SCRIPT-DEFINES $(QUIET_GEN)./generate-script.sh "$<" "$@+" ./GIT-BUILD-OPTIONS && \ mv $@+ $@ git.rc: git.rc.in GIT-VERSION-GEN GIT-VERSION-FILE $(QUIET_GEN)$(call version_gen,"$(shell pwd)",$<,$@) git.res: git.rc GIT-PREFIX $(QUIET_RC)$(RC) -i $< -o $@ # This makes sure we depend on the NO_PERL setting itself. $(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS # Used for substitution in Perl modules. Disabled when using RUNTIME_PREFIX # since the locale directory is injected. perl_localedir_SQ = $(localedir_SQ) ifndef NO_PERL PERL_HEADER_TEMPLATE = perl/header_templates/fixed_prefix.template.pl PERL_DEFINES = PERL_DEFINES += $(PERL_PATH_SQ) PERL_DEFINES += $(PERLLIB_EXTRA_SQ) PERL_DEFINES += $(perllibdir_SQ) PERL_DEFINES += $(RUNTIME_PREFIX) PERL_DEFINES += $(NO_PERL_CPAN_FALLBACKS) PERL_DEFINES += $(NO_GETTEXT) # Support Perl runtime prefix. In this mode, a different header is installed # into Perl scripts. ifdef RUNTIME_PREFIX PERL_HEADER_TEMPLATE = perl/header_templates/runtime_prefix.template.pl # Don't export a fixed $(localedir) path; it will be resolved by the Perl header # at runtime. perl_localedir_SQ = endif PERL_DEFINES += $(gitexecdir) $(perllibdir) $(localedir) $(SCRIPT_PERL_GEN): % : %.perl generate-perl.sh GIT-PERL-DEFINES GIT-PERL-HEADER GIT-VERSION-FILE $(QUIET_GEN)$(SHELL_PATH) generate-perl.sh ./GIT-BUILD-OPTIONS ./GIT-VERSION-FILE GIT-PERL-HEADER "$<" "$@+" && \ mv $@+ $@ PERL_DEFINES := $(subst $(space),:,$(PERL_DEFINES)) GIT-PERL-DEFINES: FORCE @FLAGS='$(PERL_DEFINES)'; \ if test x"$$FLAGS" != x"`cat $@ 2>/dev/null`" ; then \ echo >&2 " * new perl-specific parameters"; \ echo "$$FLAGS" >$@; \ fi GIT-PERL-HEADER: $(PERL_HEADER_TEMPLATE) GIT-PERL-DEFINES Makefile $(QUIET_GEN) \ INSTLIBDIR='$(perllibdir_SQ)' && \ INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \ INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \ sed -e 's=@PATHSEP@=$(pathsep)=g' \ -e "s=@INSTLIBDIR@=$$INSTLIBDIR=g" \ -e 's=@PERLLIBDIR_REL@=$(perllibdir_relative_SQ)=g' \ -e 's=@GITEXECDIR_REL@=$(gitexecdir_relative_SQ)=g' \ -e 's=@LOCALEDIR_REL@=$(localedir_relative_SQ)=g' \ $< >$@+ && \ mv $@+ $@ .PHONY: perllibdir perllibdir: @echo '$(perllibdir_SQ)' git-instaweb: git-instaweb.sh generate-script.sh GIT-BUILD-OPTIONS GIT-SCRIPT-DEFINES $(QUIET_GEN)./generate-script.sh "$<" "$@+" ./GIT-BUILD-OPTIONS && \ chmod +x $@+ && \ mv $@+ $@ else # NO_PERL $(SCRIPT_PERL_GEN) git-instaweb: % : unimplemented.sh $(QUIET_GEN) \ sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ -e 's|@REASON@|NO_PERL=$(NO_PERL)|g' \ unimplemented.sh >$@+ && \ chmod +x $@+ && \ mv $@+ $@ endif # NO_PERL # This makes sure we depend on the NO_PYTHON setting itself. $(SCRIPT_PYTHON_GEN): GIT-BUILD-OPTIONS ifndef NO_PYTHON $(SCRIPT_PYTHON_GEN): generate-python.sh $(SCRIPT_PYTHON_GEN): % : %.py $(QUIET_GEN)$(SHELL_PATH) generate-python.sh ./GIT-BUILD-OPTIONS "$<" "$@" else # NO_PYTHON $(SCRIPT_PYTHON_GEN): % : unimplemented.sh $(QUIET_GEN) \ sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ -e 's|@REASON@|NO_PYTHON=$(NO_PYTHON)|g' \ unimplemented.sh >$@+ && \ chmod +x $@+ && \ mv $@+ $@ endif # NO_PYTHON CONFIGURE_RECIPE = sed -e 's/@GIT_VERSION@/$(GIT_VERSION)/g' \ configure.ac >configure.ac+ && \ autoconf -o configure configure.ac+ && \ $(RM) configure.ac+ configure: configure.ac GIT-VERSION-FILE $(QUIET_GEN)$(CONFIGURE_RECIPE) ifdef AUTOCONFIGURED # We avoid depending on 'configure' here, because it gets rebuilt # every time GIT-VERSION-FILE is modified, only to update the embedded # version number string, which config.status does not care about. We # do want to recheck when the platform/environment detection logic # changes, hence this depends on configure.ac. config.status: configure.ac $(QUIET_GEN)$(CONFIGURE_RECIPE) && \ if test -f config.status; then \ ./config.status --recheck; \ else \ ./configure; \ fi reconfigure config.mak.autogen: config.status $(QUIET_GEN)./config.status .PHONY: reconfigure # This is a convenience target. endif XDIFF_OBJS += xdiff/xdiffi.o XDIFF_OBJS += xdiff/xemit.o XDIFF_OBJS += xdiff/xhistogram.o XDIFF_OBJS += xdiff/xmerge.o XDIFF_OBJS += xdiff/xpatience.o XDIFF_OBJS += xdiff/xprepare.o XDIFF_OBJS += xdiff/xutils.o .PHONY: xdiff-objs xdiff-objs: $(XDIFF_OBJS) REFTABLE_OBJS += reftable/basics.o REFTABLE_OBJS += reftable/error.o REFTABLE_OBJS += reftable/block.o REFTABLE_OBJS += reftable/blocksource.o REFTABLE_OBJS += reftable/iter.o REFTABLE_OBJS += reftable/merged.o REFTABLE_OBJS += reftable/pq.o REFTABLE_OBJS += reftable/reader.o REFTABLE_OBJS += reftable/record.o REFTABLE_OBJS += reftable/stack.o REFTABLE_OBJS += reftable/system.o REFTABLE_OBJS += reftable/tree.o REFTABLE_OBJS += reftable/writer.o TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS)) .PHONY: test-objs test-objs: $(TEST_OBJS) GIT_OBJS += $(LIB_OBJS) GIT_OBJS += $(BUILTIN_OBJS) GIT_OBJS += common-main.o GIT_OBJS += git.o .PHONY: git-objs git-objs: $(GIT_OBJS) SCALAR_OBJS += scalar.o .PHONY: scalar-objs scalar-objs: $(SCALAR_OBJS) OBJECTS += $(GIT_OBJS) OBJECTS += $(SCALAR_OBJS) OBJECTS += $(PROGRAM_OBJS) OBJECTS += $(TEST_OBJS) OBJECTS += $(XDIFF_OBJS) OBJECTS += $(FUZZ_OBJS) OBJECTS += $(REFTABLE_OBJS) $(REFTABLE_TEST_OBJS) OBJECTS += $(UNIT_TEST_OBJS) OBJECTS += $(CLAR_TEST_OBJS) OBJECTS += $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(UNIT_TEST_PROGRAMS)) ifndef NO_CURL OBJECTS += http.o http-walker.o remote-curl.o endif .PHONY: objects objects: $(OBJECTS) dep_files := $(foreach f,$(OBJECTS),$(dir $f).depend/$(notdir $f).d) dep_dirs := $(addsuffix .depend,$(sort $(dir $(OBJECTS)))) ifeq ($(COMPUTE_HEADER_DEPENDENCIES),yes) $(dep_dirs): @mkdir -p $@ missing_dep_dirs := $(filter-out $(wildcard $(dep_dirs)),$(dep_dirs)) dep_file = $(dir $@).depend/$(notdir $@).d dep_args = -MF $(dep_file) -MQ $@ -MMD -MP endif ifneq ($(COMPUTE_HEADER_DEPENDENCIES),yes) missing_dep_dirs = dep_args = endif compdb_dir = compile_commands ifeq ($(GENERATE_COMPILATION_DATABASE),yes) missing_compdb_dir = $(compdb_dir) $(missing_compdb_dir): @mkdir -p $@ compdb_file = $(compdb_dir)/$(subst /,-,$@.json) compdb_args = -MJ $(compdb_file) else missing_compdb_dir = compdb_args = endif $(OBJECTS): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir) $(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $< %.s: %.c GIT-CFLAGS FORCE $(QUIET_CC)$(CC) -o $@ -S $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $< ifdef USE_COMPUTED_HEADER_DEPENDENCIES # Take advantage of gcc's on-the-fly dependency generation # See . dep_files_present := $(wildcard $(dep_files)) ifneq ($(dep_files_present),) include $(dep_files_present) endif else $(OBJECTS): $(LIB_H) $(GENERATED_H) endif ifeq ($(GENERATE_COMPILATION_DATABASE),yes) all:: compile_commands.json compile_commands.json: $(QUIET_GEN)sed -e '1s/^/[/' -e '$$s/,$$/]/' $(compdb_dir)/*.o.json > $@+ @if test -s $@+; then mv $@+ $@; else $(RM) $@+; fi endif exec-cmd.sp exec-cmd.s exec-cmd.o: GIT-PREFIX exec-cmd.sp exec-cmd.s exec-cmd.o: EXTRA_CPPFLAGS = \ '-DGIT_EXEC_PATH="$(gitexecdir_SQ)"' \ '-DGIT_LOCALE_PATH="$(localedir_relative_SQ)"' \ '-DBINDIR="$(bindir_relative_SQ)"' \ '-DFALLBACK_RUNTIME_PREFIX="$(prefix_SQ)"' setup.sp setup.s setup.o: GIT-PREFIX setup.sp setup.s setup.o: EXTRA_CPPFLAGS = \ -DDEFAULT_GIT_TEMPLATE_DIR='"$(template_dir_SQ)"' config.sp config.s config.o: GIT-PREFIX config.sp config.s config.o: EXTRA_CPPFLAGS = \ -DETC_GITCONFIG='"$(ETC_GITCONFIG_SQ)"' attr.sp attr.s attr.o: GIT-PREFIX attr.sp attr.s attr.o: EXTRA_CPPFLAGS = \ -DETC_GITATTRIBUTES='"$(ETC_GITATTRIBUTES_SQ)"' gettext.sp gettext.s gettext.o: GIT-PREFIX gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \ -DGIT_LOCALE_PATH='"$(localedir_relative_SQ)"' http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SP_EXTRA_FLAGS += \ -DCURL_DISABLE_TYPECHECK pack-revindex.sp: SP_EXTRA_FLAGS += -Wno-memcpy-max-count ifdef NO_EXPAT http-walker.sp http-walker.s http-walker.o: EXTRA_CPPFLAGS = -DNO_EXPAT endif ifdef NO_REGEX compat/regex/regex.sp compat/regex/regex.o: EXTRA_CPPFLAGS = \ -DGAWK -DNO_MBSUPPORT endif ifdef USE_NED_ALLOCATOR compat/nedmalloc/nedmalloc.sp compat/nedmalloc/nedmalloc.o: EXTRA_CPPFLAGS = \ -DNDEBUG -DREPLACE_SYSTEM_ALLOCATOR compat/nedmalloc/nedmalloc.sp: SP_EXTRA_FLAGS += -Wno-non-pointer-null endif headless-git.o: compat/win32/headless.c GIT-CFLAGS $(QUIET_CC)$(CC) $(ALL_CFLAGS) $(COMPAT_CFLAGS) \ -fno-stack-protector -o $@ -c -Wall -Wwrite-strings $< headless-git$X: headless-git.o git.res GIT-LDFLAGS $(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) -mwindows -o $@ $< git.res git-%$X: %.o GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) git-imap-send$X: imap-send.o $(IMAP_SEND_BUILDDEPS) GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(IMAP_SEND_LDFLAGS) $(LIBS) git-http-fetch$X: http.o http-walker.o http-fetch.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(CURL_LIBCURL) $(LIBS) git-http-push$X: http.o http-push.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS) $(REMOTE_CURL_ALIASES): $(REMOTE_CURL_PRIMARY) $(QUIET_LNCP)$(RM) $@ && \ ln $< $@ 2>/dev/null || \ ln -s $< $@ 2>/dev/null || \ cp $< $@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o $(LAZYLOAD_LIBCURL_OBJ) GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS) scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \ $(filter %.o,$^) $(LIBS) $(LIB_FILE): $(LIB_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^ $(XDIFF_LIB): $(XDIFF_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^ $(REFTABLE_LIB): $(REFTABLE_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^ export DEFAULT_EDITOR DEFAULT_PAGER Documentation/GIT-EXCLUDED-PROGRAMS: FORCE @EXCLUDED='EXCLUDED_PROGRAMS := $(EXCLUDED_PROGRAMS)'; \ if test x"$$EXCLUDED" != \ x"`cat Documentation/GIT-EXCLUDED-PROGRAMS 2>/dev/null`" ; then \ echo >&2 " * new documentation flags"; \ echo "$$EXCLUDED" >Documentation/GIT-EXCLUDED-PROGRAMS; \ fi .PHONY: doc man man-perl html info pdf doc: man-perl $(MAKE) -C Documentation all man: man-perl $(MAKE) -C Documentation man man-perl: perl/build/man/man3/Git.3pm html: $(MAKE) -C Documentation html info: $(MAKE) -C Documentation info pdf: $(MAKE) -C Documentation pdf XGETTEXT_FLAGS = \ --force-po \ --add-comments=TRANSLATORS: \ --msgid-bugs-address="Git Mailing List " \ --package-name=Git XGETTEXT_FLAGS_C = $(XGETTEXT_FLAGS) --language=C \ --keyword=_ --keyword=N_ --keyword="Q_:1,2" XGETTEXT_FLAGS_SH = $(XGETTEXT_FLAGS) --language=Shell \ --keyword=gettextln --keyword=eval_gettextln XGETTEXT_FLAGS_PERL = $(XGETTEXT_FLAGS) --language=Perl \ --keyword=__ --keyword=N__ --keyword="__n:1,2" MSGMERGE_FLAGS = --add-location --backup=off --update LOCALIZED_C = $(sort $(FOUND_C_SOURCES) $(FOUND_H_SOURCES) $(GENERATED_H)) LOCALIZED_SH = $(sort $(SCRIPT_SH) git-sh-setup.sh) LOCALIZED_PERL = $(sort $(SCRIPT_PERL)) ifdef XGETTEXT_INCLUDE_TESTS LOCALIZED_C += t/t0200/test.c LOCALIZED_SH += t/t0200/test.sh LOCALIZED_PERL += t/t0200/test.perl endif ## We generate intermediate .build/pot/po/%.po files containing a ## extract of the translations we find in each file in the source ## tree. We will assemble them using msgcat to create the final ## "po/git.pot" file. LOCALIZED_ALL_GEN_PO = LOCALIZED_C_GEN_PO = $(LOCALIZED_C:%=.build/pot/po/%.po) LOCALIZED_ALL_GEN_PO += $(LOCALIZED_C_GEN_PO) LOCALIZED_SH_GEN_PO = $(LOCALIZED_SH:%=.build/pot/po/%.po) LOCALIZED_ALL_GEN_PO += $(LOCALIZED_SH_GEN_PO) LOCALIZED_PERL_GEN_PO = $(LOCALIZED_PERL:%=.build/pot/po/%.po) LOCALIZED_ALL_GEN_PO += $(LOCALIZED_PERL_GEN_PO) ## Gettext tools cannot work with our own custom PRItime type, so ## we replace PRItime with PRIuMAX. We need to update this to ## PRIdMAX if we switch to a signed type later. $(LOCALIZED_C_GEN_PO): .build/pot/po/%.po: % $(call mkdir_p_parent_template) $(QUIET_XGETTEXT) \ if grep -q PRItime $<; then \ (\ sed -e 's|PRItime|PRIuMAX|g' <$< \ >.build/pot/po/$< && \ cd .build/pot/po && \ $(XGETTEXT) --omit-header \ -o $(@:.build/pot/po/%=%) \ $(XGETTEXT_FLAGS_C) $< && \ rm $<; \ ); \ else \ $(XGETTEXT) --omit-header \ -o $@ $(XGETTEXT_FLAGS_C) $<; \ fi $(LOCALIZED_SH_GEN_PO): .build/pot/po/%.po: % $(call mkdir_p_parent_template) $(QUIET_XGETTEXT)$(XGETTEXT) --omit-header \ -o$@ $(XGETTEXT_FLAGS_SH) $< $(LOCALIZED_PERL_GEN_PO): .build/pot/po/%.po: % $(call mkdir_p_parent_template) $(QUIET_XGETTEXT)$(XGETTEXT) --omit-header \ -o$@ $(XGETTEXT_FLAGS_PERL) $< define gen_pot_header $(XGETTEXT) $(XGETTEXT_FLAGS_C) \ -o - /dev/null | \ sed -e 's|charset=CHARSET|charset=UTF-8|' \ -e 's|\(Last-Translator: \)FULL NAME <.*>|\1make by the Makefile|' \ -e 's|\(Language-Team: \)LANGUAGE <.*>|\1Git Mailing List |' \ >$@ && \ echo '"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"' >>$@ endef .build/pot/git.header: $(LOCALIZED_ALL_GEN_PO) $(call mkdir_p_parent_template) $(QUIET_GEN)$(gen_pot_header) po/git.pot: .build/pot/git.header $(LOCALIZED_ALL_GEN_PO) $(QUIET_GEN)$(MSGCAT) $^ >$@ .PHONY: pot pot: po/git.pot define check_po_file_envvar $(if $(PO_FILE), \ $(if $(filter po/%.po,$(PO_FILE)), , \ $(error PO_FILE should match pattern: "po/%.po")), \ $(error PO_FILE is not defined)) endef .PHONY: po-update po-update: po/git.pot $(check_po_file_envvar) @if test ! -e $(PO_FILE); then \ echo >&2 "error: $(PO_FILE) does not exist"; \ echo >&2 'To create an initial po file, use: "make po-init PO_FILE=po/XX.po"'; \ exit 1; \ fi $(QUIET_MSGMERGE)$(MSGMERGE) $(MSGMERGE_FLAGS) $(PO_FILE) po/git.pot .PHONY: check-pot check-pot: $(LOCALIZED_ALL_GEN_PO) ### TODO FIXME: Translating everything in these files is a bad ### heuristic for "core", as we'll translate obscure error() messages ### along with commonly seen i18n messages. A better heuristic would ### be to e.g. use spatch to first remove error/die/warning ### etc. messages. LOCALIZED_C_CORE = LOCALIZED_C_CORE += builtin/checkout.c LOCALIZED_C_CORE += builtin/clone.c LOCALIZED_C_CORE += builtin/index-pack.c LOCALIZED_C_CORE += builtin/push.c LOCALIZED_C_CORE += builtin/reset.c LOCALIZED_C_CORE += remote.c LOCALIZED_C_CORE += wt-status.c LOCALIZED_C_CORE_GEN_PO = $(LOCALIZED_C_CORE:%=.build/pot/po/%.po) .build/pot/git-core.header: $(LOCALIZED_C_CORE_GEN_PO) $(call mkdir_p_parent_template) $(QUIET_GEN)$(gen_pot_header) po/git-core.pot: .build/pot/git-core.header $(LOCALIZED_C_CORE_GEN_PO) $(QUIET_GEN)$(MSGCAT) $^ >$@ .PHONY: po-init po-init: po/git-core.pot $(check_po_file_envvar) @if test -e $(PO_FILE); then \ echo >&2 "error: $(PO_FILE) exists already"; \ exit 1; \ fi $(QUIET_MSGINIT)msginit \ --input=$< \ --output=$(PO_FILE) \ --no-translator \ --locale=$(PO_FILE:po/%.po=%) ## po/*.po files & their rules ifdef NO_GETTEXT POFILES := MOFILES := else POFILES := $(wildcard po/*.po) MOFILES := $(patsubst po/%.po,po/build/locale/%/LC_MESSAGES/git.mo,$(POFILES)) all:: $(MOFILES) endif po/build/locale/%/LC_MESSAGES/git.mo: po/%.po $(call mkdir_p_parent_template) $(QUIET_MSGFMT)$(MSGFMT) -o $@ $< LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm) LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL)) LIB_CPAN := $(wildcard perl/FromCPAN/*.pm perl/FromCPAN/*/*.pm) LIB_CPAN_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_CPAN)) ifndef NO_PERL all:: $(LIB_PERL_GEN) ifndef NO_PERL_CPAN_FALLBACKS all:: $(LIB_CPAN_GEN) endif NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS)) endif perl/build/lib/%.pm: perl/%.pm generate-perl.sh GIT-BUILD-OPTIONS GIT-VERSION-FILE GIT-PERL-DEFINES $(call mkdir_p_parent_template) $(QUIET_GEN)$(SHELL_PATH) generate-perl.sh ./GIT-BUILD-OPTIONS ./GIT-VERSION-FILE GIT-PERL-HEADER "$<" "$@" perl/build/man/man3/Git.3pm: perl/Git.pm $(call mkdir_p_parent_template) $(QUIET_GEN)pod2man $< $@ $(ETAGS_TARGET): $(FOUND_SOURCE_FILES) $(QUIET_GEN)$(RM) $@+ && \ echo $(FOUND_SOURCE_FILES) | xargs etags -a -o $@+ && \ mv $@+ $@ tags: $(FOUND_SOURCE_FILES) $(QUIET_GEN)$(RM) $@+ && \ echo $(FOUND_SOURCE_FILES) | xargs ctags -a -o $@+ && \ mv $@+ $@ cscope.out: $(FOUND_SOURCE_FILES) $(QUIET_GEN)$(RM) $@+ && \ echo $(FOUND_SOURCE_FILES) | xargs cscope -f$@+ -b && \ mv $@+ $@ .PHONY: cscope cscope: cscope.out ### Detect prefix changes TRACK_PREFIX = $(bindir_SQ):$(gitexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):\ $(localedir_SQ) GIT-PREFIX: FORCE @FLAGS='$(TRACK_PREFIX)'; \ if test x"$$FLAGS" != x"`cat GIT-PREFIX 2>/dev/null`" ; then \ echo >&2 " * new prefix flags"; \ echo "$$FLAGS" >GIT-PREFIX; \ fi TRACK_CFLAGS = $(CC):$(subst ','\'',$(ALL_CFLAGS)):$(USE_GETTEXT_SCHEME) GIT-CFLAGS: FORCE @FLAGS='$(TRACK_CFLAGS)'; \ if test x"$$FLAGS" != x"`cat GIT-CFLAGS 2>/dev/null`" ; then \ echo >&2 " * new build flags"; \ echo "$$FLAGS" >GIT-CFLAGS; \ fi TRACK_LDFLAGS = $(subst ','\'',$(ALL_LDFLAGS)) GIT-LDFLAGS: FORCE @FLAGS='$(TRACK_LDFLAGS)'; \ if test x"$$FLAGS" != x"`cat GIT-LDFLAGS 2>/dev/null`" ; then \ echo >&2 " * new link flags"; \ echo "$$FLAGS" >GIT-LDFLAGS; \ fi ifdef RUNTIME_PREFIX RUNTIME_PREFIX_OPTION = true else RUNTIME_PREFIX_OPTION = false endif # We need to apply sq twice, once to protect from the shell # that runs GIT-BUILD-OPTIONS, and then again to protect it # and the first level quoting from the shell that runs "echo". GIT-BUILD-OPTIONS: FORCE @sed \ -e "s!@BROKEN_PATH_FIX@!\'$(BROKEN_PATH_FIX)\'!" \ -e "s|@DIFF@|\'$(DIFF)\'|" \ -e "s|@FSMONITOR_DAEMON_BACKEND@|\'$(FSMONITOR_DAEMON_BACKEND)\'|" \ -e "s|@FSMONITOR_OS_SETTINGS@|\'$(FSMONITOR_OS_SETTINGS)\'|" \ -e "s|@GITWEBDIR@|\'$(gitwebdir_SQ)\'|" \ -e "s|@GIT_INTEROP_MAKE_OPTS@|\'$(GIT_INTEROP_MAKE_OPTS)\'|" \ -e "s|@GIT_PERF_LARGE_REPO@|\'$(GIT_PERF_LARGE_REPO)\'|" \ -e "s|@GIT_PERF_MAKE_COMMAND@|\'$(GIT_PERF_MAKE_COMMAND)\'|" \ -e "s|@GIT_PERF_MAKE_OPTS@|\'$(GIT_PERF_MAKE_OPTS)\'|" \ -e "s|@GIT_PERF_REPEAT_COUNT@|\'$(GIT_PERF_REPEAT_COUNT)\'|" \ -e "s|@GIT_PERF_REPO@|\'$(GIT_PERF_REPO)\'|" \ -e "s|@GIT_TEST_CMP@|\'$(GIT_TEST_CMP)\'|" \ -e "s|@GIT_TEST_CMP_USE_COPIED_CONTEXT@|\'$(GIT_TEST_CMP_USE_COPIED_CONTEXT)\'|" \ -e "s|@GIT_TEST_GITPERLLIB@|\'$(shell pwd)/perl/build/lib\'|" \ -e "s|@GIT_TEST_INDEX_VERSION@|\'$(GIT_TEST_INDEX_VERSION)\'|" \ -e "s|@GIT_TEST_MERGE_TOOLS_DIR@|\'$(shell pwd)/mergetools\'|" \ -e "s|@GIT_TEST_OPTS@|\'$(GIT_TEST_OPTS)\'|" \ -e "s|@GIT_TEST_PERL_FATAL_WARNINGS@|\'$(GIT_TEST_PERL_FATAL_WARNINGS)\'|" \ -e "s|@GIT_TEST_POPATH@|\'$(shell pwd)/po\'|" \ -e "s|@GIT_TEST_TEMPLATE_DIR@|\'$(shell pwd)/templates/blt\'|" \ -e "s|@GIT_TEST_TEXTDOMAINDIR@|\'$(shell pwd)/po/build/locale\'|" \ -e "s|@GIT_TEST_UTF8_LOCALE@|\'$(GIT_TEST_UTF8_LOCALE)\'|" \ -e "s|@LOCALEDIR@|\'$(localedir_SQ)\'|" \ -e "s|@NO_CURL@|\'$(NO_CURL)\'|" \ -e "s|@NO_EXPAT@|\'$(NO_EXPAT)\'|" \ -e "s|@NO_GETTEXT@|\'$(NO_GETTEXT)\'|" \ -e "s|@NO_GITWEB@|\'$(NO_GITWEB)\'|" \ -e "s|@NO_ICONV@|\'$(NO_ICONV)\'|" \ -e "s|@NO_PERL@|\'$(NO_PERL)\'|" \ -e "s|@NO_PERL_CPAN_FALLBACKS@|\'$(NO_PERL_CPAN_FALLBACKS_SQ)\'|" \ -e "s|@NO_PTHREADS@|\'$(NO_PTHREADS)\'|" \ -e "s|@NO_PYTHON@|\'$(NO_PYTHON)\'|" \ -e "s|@NO_REGEX@|\'$(NO_REGEX)\'|" \ -e "s|@NO_UNIX_SOCKETS@|\'$(NO_UNIX_SOCKETS)\'|" \ -e "s|@PAGER_ENV@|\'$(PAGER_ENV)\'|" \ -e "s|@PERL_LOCALEDIR@|\'$(perl_localedir_SQ)\'|" \ -e "s|@PERL_PATH@|\'$(PERL_PATH_SQ)\'|" \ -e "s|@PYTHON_PATH@|\'$(PYTHON_PATH_SQ)\'|" \ -e "s|@RUNTIME_PREFIX@|\'$(RUNTIME_PREFIX_OPTION)\'|" \ -e "s|@SANITIZE_ADDRESS@|\'$(SANITIZE_ADDRESS)\'|" \ -e "s|@SANITIZE_LEAK@|\'$(SANITIZE_LEAK)\'|" \ -e "s|@SHELL_PATH@|\'$(SHELL_PATH_SQ)\'|" \ -e "s|@TAR@|\'$(TAR)\'|" \ -e "s|@TEST_OUTPUT_DIRECTORY@|\'$(TEST_OUTPUT_DIRECTORY)\'|" \ -e "s|@TEST_SHELL_PATH@|\'$(TEST_SHELL_PATH_SQ)\'|" \ -e "s|@USE_GETTEXT_SCHEME@|\'$(USE_GETTEXT_SCHEME)\'|" \ -e "s|@USE_LIBPCRE2@|\'$(USE_LIBPCRE2)\'|" \ -e "s|@X@|\'$(X)\'|" \ GIT-BUILD-OPTIONS.in >$@+ @if grep -q '^[A-Z][A-Z_]*=@.*@$$' $@+; then echo "Unsubstituted build options in $@" >&2 && exit 1; fi @if cmp $@+ $@ >/dev/null 2>&1; then $(RM) $@+; else mv $@+ $@; fi @if test -f GIT-BUILD-DIR; then rm GIT-BUILD-DIR; fi ### Detect Python interpreter path changes ifndef NO_PYTHON TRACK_PYTHON = $(subst ','\'',-DPYTHON_PATH='$(PYTHON_PATH_SQ)') GIT-PYTHON-VARS: FORCE @VARS='$(TRACK_PYTHON)'; \ if test x"$$VARS" != x"`cat $@ 2>/dev/null`" ; then \ echo >&2 " * new Python interpreter location"; \ echo "$$VARS" >$@; \ fi endif test_bindir_programs := $(patsubst %,bin-wrappers/%,$(BINDIR_PROGRAMS_NEED_X) $(BINDIR_PROGRAMS_NO_X) $(TEST_PROGRAMS_NEED_X)) all:: $(TEST_PROGRAMS) $(test_bindir_programs) $(UNIT_TEST_PROGS) $(CLAR_TEST_PROG) $(test_bindir_programs): bin-wrappers/%: bin-wrappers/wrap-for-bin.sh $(QUIET_GEN)sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ -e 's|@BUILD_DIR@|$(shell pwd)|' \ -e 's|@GIT_TEXTDOMAINDIR@|$(shell pwd)/po/build/locale|' \ -e 's|@GITPERLLIB@|$(shell pwd)/perl/build/lib|' \ -e 's|@MERGE_TOOLS_DIR@|$(shell pwd)/mergetools|' \ -e 's|@TEMPLATE_DIR@|$(shell pwd)/templates/blt|' \ -e 's|@PROG@|$(shell pwd)/$(patsubst test-%,t/helper/test-%,$(@F))$(if $(filter-out $(BINDIR_PROGRAMS_NO_X),$(@F)),$(X),)|' < $< > $@ && \ chmod +x $@ # GNU make supports exporting all variables by "export" without parameters. # However, the environment gets quite big, and some programs have problems # with that. export NO_SVN_TESTS export TEST_NO_MALLOC_CHECK ### Testing rules test: all $(MAKE) -C t/ all perf: all $(MAKE) -C t/perf/ all .PHONY: test perf .PRECIOUS: $(TEST_OBJS) t/helper/test-tool$X: $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS)) $(UNIT_TEST_DIR)/test-lib.o t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(filter %.a,$^) $(LIBS) check-sha1:: t/helper/test-tool$X t/helper/test-sha1.sh SP_SRC = $(filter-out $(THIRD_PARTY_SOURCES),$(patsubst %.o,%.c,$(OBJECTS))) SP_OBJ = $(patsubst %.c,%.sp,$(SP_SRC)) $(SP_OBJ): %.sp: %.c %.o $(GENERATED_H) $(QUIET_SP)cgcc -no-compile $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) \ -Wsparse-error \ $(SPARSE_FLAGS) $(SP_EXTRA_FLAGS) $< && \ >$@ .PHONY: sparse sparse: $(SP_OBJ) EXCEPT_HDRS := $(GENERATED_H) unicode-width.h compat/% xdiff/% $(UNIT_TEST_DIR)/clar/% $(UNIT_TEST_DIR)/clar/clar/% ifndef OPENSSL_SHA1 EXCEPT_HDRS += sha1/openssl.h endif ifndef OPENSSL_SHA256 EXCEPT_HDRS += sha256/openssl.h endif ifndef NETTLE_SHA256 EXCEPT_HDRS += sha256/nettle.h endif ifndef GCRYPT_SHA256 EXCEPT_HDRS += sha256/gcrypt.h endif CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(LIB_H)) HCO = $(patsubst %.h,%.hco,$(CHK_HDRS)) HCC = $(HCO:hco=hcc) %.hcc: %.h @echo '#include "git-compat-util.h"' >$@ @echo '#include "$<"' >>$@ $(HCO): %.hco: %.hcc $(GENERATED_H) FORCE $(QUIET_HDR)$(CC) $(ALL_CFLAGS) -o /dev/null -c -xc $< .PHONY: hdr-check $(HCO) hdr-check: $(HCO) .PHONY: style style: git clang-format --style file --diff --extensions c,h .PHONY: check check: @if sparse; \ then \ echo >&2 "Use 'make sparse' instead"; \ $(MAKE) --no-print-directory sparse; \ else \ echo >&2 "Did you mean 'make test'?"; \ exit 1; \ fi COCCI_GEN_ALL = .build/contrib/coccinelle/ALL.cocci COCCI_GLOB = $(wildcard contrib/coccinelle/*.cocci) COCCI_RULES_TRACKED = $(COCCI_GLOB:%=.build/%) COCCI_RULES_TRACKED_NO_PENDING = $(filter-out %.pending.cocci,$(COCCI_RULES_TRACKED)) COCCI_RULES = COCCI_RULES += $(COCCI_GEN_ALL) COCCI_RULES += $(COCCI_RULES_TRACKED) COCCI_NAMES = COCCI_NAMES += $(COCCI_RULES:.build/contrib/coccinelle/%.cocci=%) COCCICHECK_PENDING = $(filter %.pending.cocci,$(COCCI_RULES)) COCCICHECK = $(filter-out $(COCCICHECK_PENDING),$(COCCI_RULES)) COCCICHECK_PATCHES = $(COCCICHECK:%=%.patch) COCCICHECK_PATCHES_PENDING = $(COCCICHECK_PENDING:%=%.patch) COCCICHECK_PATCHES_INTREE = $(COCCICHECK_PATCHES:.build/%=%) COCCICHECK_PATCHES_PENDING_INTREE = $(COCCICHECK_PATCHES_PENDING:.build/%=%) # It's expensive to compute the many=many rules below, only eval them # on $(MAKECMDGOALS) that match these $(COCCI_RULES) COCCI_RULES_GLOB = COCCI_RULES_GLOB += cocci% COCCI_RULES_GLOB += .build/contrib/coccinelle/% COCCI_RULES_GLOB += $(COCCICHECK_PATCHES) COCCI_RULES_GLOB += $(COCCICHEC_PATCHES_PENDING) COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_INTREE) COCCI_RULES_GLOB += $(COCCICHECK_PATCHES_PENDING_INTREE) COCCI_GOALS = $(filter $(COCCI_RULES_GLOB),$(MAKECMDGOALS)) COCCI_TEST_RES = $(wildcard contrib/coccinelle/tests/*.res) $(COCCI_RULES_TRACKED): .build/% : % $(call mkdir_p_parent_template) $(QUIET_CP)cp $< $@ .build/contrib/coccinelle/FOUND_H_SOURCES: $(FOUND_H_SOURCES) $(call mkdir_p_parent_template) $(QUIET_GEN) >$@ $(COCCI_GEN_ALL): $(COCCI_RULES_TRACKED_NO_PENDING) $(call mkdir_p_parent_template) $(QUIET_SPATCH_CAT)cat $^ >$@ ifeq ($(COMPUTE_HEADER_DEPENDENCIES),no) SPATCH_USE_O_DEPENDENCIES = endif define cocci-rule ## Rule for .build/$(1).patch/$(2); Params: # $(1) = e.g. ".build/contrib/coccinelle/free.cocci" # $(2) = e.g. "grep.c" # $(3) = e.g. "grep.o" COCCI_$(1:.build/contrib/coccinelle/%.cocci=%) += $(1).d/$(2).patch $(1).d/$(2).patch: GIT-SPATCH-DEFINES $(1).d/$(2).patch: $(if $(and $(SPATCH_USE_O_DEPENDENCIES),$(wildcard $(3))),$(3),.build/contrib/coccinelle/FOUND_H_SOURCES) $(1).d/$(2).patch: $(1) $(1).d/$(2).patch: $(1).d/%.patch : % $$(call mkdir_p_parent_template) $$(QUIET_SPATCH)if ! $$(SPATCH) $$(SPATCH_FLAGS) \ $$(SPATCH_INCLUDE_FLAGS) \ --sp-file $(1) --patch . $$< \ >$$@ 2>$$@.log; \ then \ echo "ERROR when applying '$(1)' to '$$<'; '$$@.log' follows:"; \ cat $$@.log; \ exit 1; \ fi endef define cocci-matrix $(foreach s,$(COCCI_SOURCES),$(call cocci-rule,$(c),$(s),$(s:%.c=%.o))) endef ifdef COCCI_GOALS $(eval $(foreach c,$(COCCI_RULES),$(call cocci-matrix,$(c)))) endif define spatch-rule .build/contrib/coccinelle/$(1).cocci.patch: $$(COCCI_$(1)) $$(QUIET_SPATCH_CAT)cat $$^ >$$@ && \ if test -s $$@; \ then \ echo ' ' SPATCH result: $$@; \ fi contrib/coccinelle/$(1).cocci.patch: .build/contrib/coccinelle/$(1).cocci.patch $$(QUIET_CP)cp $$< $$@ endef ifdef COCCI_GOALS $(eval $(foreach n,$(COCCI_NAMES),$(call spatch-rule,$(n)))) endif COCCI_TEST_RES_GEN = $(addprefix .build/,$(COCCI_TEST_RES)) $(COCCI_TEST_RES_GEN): GIT-SPATCH-DEFINES $(COCCI_TEST_RES_GEN): .build/%.res : %.c $(COCCI_TEST_RES_GEN): .build/%.res : %.res ifdef SPATCH_CONCAT_COCCI $(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : $(COCCI_GEN_ALL) else $(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinelle/%.cocci endif $(call mkdir_p_parent_template) $(QUIET_SPATCH_TEST)$(SPATCH) $(SPATCH_TEST_FLAGS) \ --very-quiet --no-show-diff \ --sp-file $< -o $@ \ $(@:.build/%.res=%.c) && \ cmp $(@:.build/%=%) $@ || \ git -P diff --no-index $(@:.build/%=%) $@ 2>/dev/null; \ .PHONY: coccicheck-test coccicheck-test: $(COCCI_TEST_RES_GEN) coccicheck: coccicheck-test ifdef SPATCH_CONCAT_COCCI coccicheck: contrib/coccinelle/ALL.cocci.patch else coccicheck: $(COCCICHECK_PATCHES_INTREE) endif # See contrib/coccinelle/README coccicheck-pending: coccicheck-test coccicheck-pending: $(COCCICHECK_PATCHES_PENDING_INTREE) .PHONY: coccicheck coccicheck-pending # "Sub"-Makefiles, not really because they can't be run stand-alone, # only there to contain directory-specific rules and variables ## gitweb/Makefile inclusion: MAK_DIR_GITWEB = gitweb/ include gitweb/Makefile .PHONY: gitweb gitweb: $(MAK_DIR_GITWEB_ALL) ifndef NO_GITWEB all:: gitweb endif ### Installation rules ifneq ($(filter /%,$(firstword $(template_dir))),) template_instdir = $(template_dir) else template_instdir = $(prefix)/$(template_dir) endif export template_instdir ifneq ($(filter /%,$(firstword $(gitexecdir))),) gitexec_instdir = $(gitexecdir) else gitexec_instdir = $(prefix)/$(gitexecdir) endif gitexec_instdir_SQ = $(subst ','\'',$(gitexec_instdir)) export gitexec_instdir ifneq ($(filter /%,$(firstword $(mergetoolsdir))),) mergetools_instdir = $(mergetoolsdir) else mergetools_instdir = $(prefix)/$(mergetoolsdir) endif mergetools_instdir_SQ = $(subst ','\'',$(mergetools_instdir)) install_bindir_xprograms := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) install_bindir_programs := $(install_bindir_xprograms) $(BINDIR_PROGRAMS_NO_X) .PHONY: profile-install profile-fast-install profile-install: profile $(MAKE) install profile-fast-install: profile-fast $(MAKE) install INSTALL_STRIP = install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' $(INSTALL) $(INSTALL_STRIP) $(PROGRAMS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' $(INSTALL) $(SCRIPTS) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' $(INSTALL) -m 644 $(SCRIPT_LIB) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' $(INSTALL) $(INSTALL_STRIP) $(install_bindir_xprograms) '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) $(BINDIR_PROGRAMS_NO_X) '$(DESTDIR_SQ)$(bindir_SQ)' ifdef MSVC # We DO NOT install the individual foo.o.pdb files because they # have already been rolled up into the exe's pdb file. # We DO NOT have pdb files for the builtin commands (like git-status.exe) # because it is just a copy/hardlink of git.exe, rather than a unique binary. $(INSTALL) $(patsubst %.exe,%.pdb,$(filter-out $(BUILT_INS),$(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)))) '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) $(patsubst %.exe,%.pdb,$(filter-out $(BUILT_INS) $(REMOTE_CURL_ALIASES),$(PROGRAMS))) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' ifndef DEBUG $(INSTALL) $(vcpkg_rel_bin)/*.dll '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) $(vcpkg_rel_bin)/*.pdb '$(DESTDIR_SQ)$(bindir_SQ)' else $(INSTALL) $(vcpkg_dbg_bin)/*.dll '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) $(vcpkg_dbg_bin)/*.pdb '$(DESTDIR_SQ)$(bindir_SQ)' endif endif $(MAKE) -C templates DESTDIR='$(DESTDIR_SQ)' install $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mergetools_instdir_SQ)' $(INSTALL) -m 644 mergetools/* '$(DESTDIR_SQ)$(mergetools_instdir_SQ)' ifndef NO_GETTEXT $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(localedir_SQ)' (cd po/build/locale && $(TAR) cf - .) | \ (cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -) endif ifndef NO_PERL $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)' (cd perl/build/lib && $(TAR) cf - .) | \ (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -) endif ifndef NO_TCLTK $(MAKE) -C gitk-git install $(MAKE) -C git-gui gitexecdir='$(gitexec_instdir_SQ)' install endif ifneq (,$X) $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_COMMANDS_TO_INSTALL) $(OTHER_PROGRAMS))), test '$(DESTDIR_SQ)$(gitexec_instdir_SQ)/$p' -ef '$(DESTDIR_SQ)$(gitexec_instdir_SQ)/$p$X' || $(RM) '$(DESTDIR_SQ)$(gitexec_instdir_SQ)/$p';) endif bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \ execdir=$$(cd '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' && pwd) && \ destdir_from_execdir_SQ=$$(echo '$(gitexecdir_relative_SQ)' | sed -e 's|[^/][^/]*|..|g') && \ { test "$$bindir/" = "$$execdir/" || \ for p in $(OTHER_PROGRAMS) $(filter $(install_bindir_programs),$(ALL_PROGRAMS)); do \ $(RM) "$$execdir/$$p" && \ test -n "$(INSTALL_SYMLINKS)" && \ ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/$$p" "$$execdir/$$p" || \ { test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \ ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \ cp "$$bindir/$$p" "$$execdir/$$p" || exit; } \ done; \ } && \ for p in $(filter $(install_bindir_programs),$(BUILT_INS)); do \ $(RM) "$$bindir/$$p" && \ test -n "$(INSTALL_SYMLINKS)" && \ ln -s "git$X" "$$bindir/$$p" || \ { test -z "$(NO_INSTALL_HARDLINKS)" && \ ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \ ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \ cp "$$bindir/git$X" "$$bindir/$$p" || exit; }; \ done && \ for p in $(BUILT_INS); do \ $(RM) "$$execdir/$$p" && \ if test -z "$(SKIP_DASHED_BUILT_INS)"; \ then \ test -n "$(INSTALL_SYMLINKS)" && \ ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/git$X" "$$execdir/$$p" || \ { test -z "$(NO_INSTALL_HARDLINKS)" && \ ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \ ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \ cp "$$execdir/git$X" "$$execdir/$$p" || exit; }; \ fi \ done && \ remote_curl_aliases="$(REMOTE_CURL_ALIASES)" && \ for p in $$remote_curl_aliases; do \ $(RM) "$$execdir/$$p" && \ test -n "$(INSTALL_SYMLINKS)" && \ ln -s "git-remote-http$X" "$$execdir/$$p" || \ { test -z "$(NO_INSTALL_HARDLINKS)" && \ ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \ ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \ cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \ done .PHONY: install-doc install-man install-man-perl install-html install-info install-pdf .PHONY: quick-install-doc quick-install-man quick-install-html install-doc: install-man-perl $(MAKE) -C Documentation install install-man: install-man-perl $(MAKE) -C Documentation install-man install-man-perl: man-perl $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3' (cd perl/build/man/man3 && $(TAR) cf - .) | \ (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -) install-html: $(MAKE) -C Documentation install-html install-info: $(MAKE) -C Documentation install-info install-pdf: $(MAKE) -C Documentation install-pdf quick-install-doc: $(MAKE) -C Documentation quick-install quick-install-man: $(MAKE) -C Documentation quick-install-man quick-install-html: $(MAKE) -C Documentation quick-install-html ### Maintainer's dist rules GIT_TARNAME = git-$(GIT_VERSION) GIT_ARCHIVE_EXTRA_FILES = \ --prefix=$(GIT_TARNAME)/ \ --add-file=configure \ --add-file=.dist-tmp-dir/version \ --prefix=$(GIT_TARNAME)/git-gui/ \ --add-file=.dist-tmp-dir/git-gui/version ifdef DC_SHA1_SUBMODULE GIT_ARCHIVE_EXTRA_FILES += \ --prefix=$(GIT_TARNAME)/sha1collisiondetection/ \ --add-file=sha1collisiondetection/LICENSE.txt \ --prefix=$(GIT_TARNAME)/sha1collisiondetection/lib/ \ --add-file=sha1collisiondetection/lib/sha1.c \ --add-file=sha1collisiondetection/lib/sha1.h \ --add-file=sha1collisiondetection/lib/ubc_check.c \ --add-file=sha1collisiondetection/lib/ubc_check.h endif dist: git-archive$(X) configure @$(RM) -r .dist-tmp-dir @mkdir .dist-tmp-dir @echo $(GIT_VERSION) > .dist-tmp-dir/version @$(MAKE) -C git-gui TARDIR=../.dist-tmp-dir/git-gui dist-version ./git-archive --format=tar \ $(GIT_ARCHIVE_EXTRA_FILES) \ --prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar @$(RM) -r .dist-tmp-dir gzip -f -9 $(GIT_TARNAME).tar rpm:: @echo >&2 "Use distro packaged sources to run rpmbuild" @false .PHONY: rpm ifneq ($(INCLUDE_DLLS_IN_ARTIFACTS),) OTHER_PROGRAMS += $(shell echo *.dll t/helper/*.dll t/unit-tests/bin/*.dll) endif artifacts-tar:: $(ALL_COMMANDS_TO_INSTALL) $(SCRIPT_LIB) $(OTHER_PROGRAMS) \ GIT-BUILD-OPTIONS $(TEST_PROGRAMS) $(test_bindir_programs) \ $(UNIT_TEST_PROGS) $(CLAR_TEST_PROG) $(MOFILES) $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) \ SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)' test -n "$(ARTIFACTS_DIRECTORY)" mkdir -p "$(ARTIFACTS_DIRECTORY)" $(TAR) czf "$(ARTIFACTS_DIRECTORY)/artifacts.tar.gz" $^ templates/blt/ .PHONY: artifacts-tar htmldocs = git-htmldocs-$(GIT_VERSION) manpages = git-manpages-$(GIT_VERSION) .PHONY: dist-doc distclean dist-doc: git$X $(RM) -r .doc-tmp-dir mkdir .doc-tmp-dir $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc ./git -C .doc-tmp-dir init ./git -C .doc-tmp-dir add . ./git -C .doc-tmp-dir commit -m htmldocs ./git -C .doc-tmp-dir archive --format=tar --prefix=./ HEAD^{tree} \ > $(htmldocs).tar gzip -n -9 -f $(htmldocs).tar : $(RM) -r .doc-tmp-dir mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7 $(MAKE) -C Documentation DESTDIR=./ \ man1dir=../.doc-tmp-dir/man1 \ man5dir=../.doc-tmp-dir/man5 \ man7dir=../.doc-tmp-dir/man7 \ install ./git -C .doc-tmp-dir init ./git -C .doc-tmp-dir add . ./git -C .doc-tmp-dir commit -m manpages ./git -C .doc-tmp-dir archive --format=tar --prefix=./ HEAD^{tree} \ > $(manpages).tar gzip -n -9 -f $(manpages).tar $(RM) -r .doc-tmp-dir ### Cleaning rules distclean: clean $(RM) configure $(RM) config.log config.status config.cache $(RM) config.mak.autogen config.mak.append $(RM) -r autom4te.cache profile-clean: $(RM) $(addsuffix *.gcda,$(addprefix $(PROFILE_DIR)/, $(object_dirs))) $(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs))) cocciclean: $(RM) GIT-SPATCH-DEFINES $(RM) -r .build/contrib/coccinelle $(RM) contrib/coccinelle/*.cocci.patch clean: profile-clean coverage-clean cocciclean $(RM) -r .build $(UNIT_TEST_BIN) $(RM) GIT-TEST-SUITES $(RM) po/git.pot po/git-core.pot $(RM) git.rc git.res $(RM) $(OBJECTS) $(RM) headless-git.o $(RM) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) $(OTHER_PROGRAMS) $(RM) $(TEST_PROGRAMS) $(RM) $(FUZZ_PROGRAMS) $(RM) $(SP_OBJ) $(RM) $(HCC) $(RM) version-def.h $(RM) -r $(dep_dirs) $(compdb_dir) compile_commands.json $(RM) $(test_bindir_programs) $(RM) -r po/build/ $(RM) *.pyc *.pyo */*.pyc */*.pyo $(GENERATED_H) $(ETAGS_TARGET) tags cscope* $(RM) -r .dist-tmp-dir .doc-tmp-dir $(RM) $(GIT_TARNAME).tar.gz $(RM) $(htmldocs).tar.gz $(manpages).tar.gz $(MAKE) -C Documentation/ clean $(RM) Documentation/GIT-EXCLUDED-PROGRAMS ifndef NO_PERL $(RM) -r perl/build/ endif $(MAKE) -C templates/ clean $(MAKE) -C t/ clean ifndef NO_TCLTK $(MAKE) -C gitk-git clean $(MAKE) -C git-gui clean endif $(RM) GIT-VERSION-FILE GIT-CFLAGS GIT-LDFLAGS GIT-BUILD-OPTIONS $(RM) GIT-USER-AGENT GIT-PREFIX $(RM) GIT-SCRIPT-DEFINES GIT-PERL-DEFINES GIT-PERL-HEADER GIT-PYTHON-VARS ifdef MSVC $(RM) $(patsubst %.o,%.o.pdb,$(OBJECTS)) $(RM) headless-git.o.pdb $(RM) $(patsubst %.exe,%.pdb,$(OTHER_PROGRAMS)) $(RM) $(patsubst %.exe,%.ilk,$(OTHER_PROGRAMS)) $(RM) $(patsubst %.exe,%.iobj,$(OTHER_PROGRAMS)) $(RM) $(patsubst %.exe,%.ipdb,$(OTHER_PROGRAMS)) $(RM) $(patsubst %.exe,%.pdb,$(PROGRAMS)) $(RM) $(patsubst %.exe,%.ilk,$(PROGRAMS)) $(RM) $(patsubst %.exe,%.iobj,$(PROGRAMS)) $(RM) $(patsubst %.exe,%.ipdb,$(PROGRAMS)) $(RM) $(patsubst %.exe,%.pdb,$(TEST_PROGRAMS)) $(RM) $(patsubst %.exe,%.ilk,$(TEST_PROGRAMS)) $(RM) $(patsubst %.exe,%.iobj,$(TEST_PROGRAMS)) $(RM) $(patsubst %.exe,%.ipdb,$(TEST_PROGRAMS)) $(RM) compat/vcbuild/MSVC-DEFS-GEN endif .PHONY: all install profile-clean cocciclean clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell .PHONY: FORCE ### Check documentation # ALL_COMMANDS = $(ALL_COMMANDS_TO_INSTALL) $(SCRIPT_LIB) ALL_COMMANDS += git ALL_COMMANDS += git-citool ALL_COMMANDS += git-gui ALL_COMMANDS += gitk ALL_COMMANDS += gitweb ALL_COMMANDS += scalar .PHONY: check-docs check-docs:: $(MAKE) -C Documentation lint-docs ### Make sure built-ins do not have dups and listed in git.c # check-builtins:: ./check-builtins.sh ### Test suite coverage testing # .PHONY: coverage coverage-clean coverage-compile coverage-test coverage-report .PHONY: coverage-untested-functions cover_db cover_db_html .PHONY: coverage-clean-results coverage: $(MAKE) coverage-test $(MAKE) coverage-untested-functions object_dirs := $(sort $(dir $(OBJECTS))) coverage-clean-results: $(RM) $(addsuffix *.gcov,$(object_dirs)) $(RM) $(addsuffix *.gcda,$(object_dirs)) $(RM) coverage-untested-functions $(RM) -r cover_db/ $(RM) -r cover_db_html/ $(RM) coverage-test.made coverage-clean: coverage-clean-results $(RM) $(addsuffix *.gcno,$(object_dirs)) COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov GCOVFLAGS = --preserve-paths --branch-probabilities --all-blocks coverage-compile: $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all coverage-test: coverage-clean-results coverage-compile $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ DEFAULT_TEST_TARGET=test -j1 test touch coverage-test.made coverage-test.made: $(MAKE) coverage-test coverage-prove: coverage-clean-results coverage-compile $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \ DEFAULT_TEST_TARGET=prove GIT_PROVE_OPTS="$(GIT_PROVE_OPTS) -j1" \ -j1 test coverage-report: coverage-test.made $(QUIET_GCOV)for dir in $(object_dirs); do \ $(GCOV) $(GCOVFLAGS) --object-directory=$$dir $$dir*.c || exit; \ done coverage-untested-functions: coverage-report grep '^function.*called 0 ' *.c.gcov \ | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \ > coverage-untested-functions cover_db: coverage-report gcov2perl -db cover_db *.gcov cover_db_html: cover_db cover -report html -outputdir cover_db_html cover_db ### Fuzz testing # # Building fuzz targets generally requires a special set of compiler flags that # are not necessarily appropriate for general builds, and that vary greatly # depending on the compiler version used. # # An example command to build against libFuzzer from LLVM 11.0.0: # # make CC=clang FUZZ_CXX=clang++ \ # CFLAGS="-fsanitize=fuzzer-no-link,address" \ # LIB_FUZZING_ENGINE="-fsanitize=fuzzer,address" \ # fuzz-all # FUZZ_CXX ?= $(CC) FUZZ_CXXFLAGS ?= $(ALL_CFLAGS) .PHONY: fuzz-all fuzz-all: $(FUZZ_PROGRAMS) $(FUZZ_PROGRAMS): %: %.o oss-fuzz/dummy-cmd-main.o $(GITLIBS) GIT-LDFLAGS $(QUIET_LINK)$(FUZZ_CXX) $(FUZZ_CXXFLAGS) -o $@ $(ALL_LDFLAGS) \ -Wl,--allow-multiple-definition \ $(filter %.o,$^) $(filter %.a,$^) $(LIBS) $(LIB_FUZZING_ENGINE) $(UNIT_TEST_PROGS): $(UNIT_TEST_BIN)/%$X: $(UNIT_TEST_DIR)/%.o $(UNIT_TEST_OBJS) \ $(GITLIBS) GIT-LDFLAGS $(call mkdir_p_parent_template) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \ $(filter %.o,$^) $(filter %.a,$^) $(LIBS) GIT-TEST-SUITES: FORCE @FLAGS='$(CLAR_TEST_SUITES)'; \ if test x"$$FLAGS" != x"`cat GIT-TEST-SUITES 2>/dev/null`" ; then \ echo >&2 " * new test suites"; \ echo "$$FLAGS" >GIT-TEST-SUITES; \ fi $(UNIT_TEST_DIR)/clar-decls.h: $(patsubst %,$(UNIT_TEST_DIR)/%.c,$(CLAR_TEST_SUITES)) $(UNIT_TEST_DIR)/generate-clar-decls.sh GIT-TEST-SUITES $(QUIET_GEN)$(SHELL_PATH) $(UNIT_TEST_DIR)/generate-clar-decls.sh "$@" $(filter %.c,$^) $(UNIT_TEST_DIR)/clar.suite: $(UNIT_TEST_DIR)/clar-decls.h $(UNIT_TEST_DIR)/generate-clar-suites.sh $(QUIET_GEN)$(SHELL_PATH) $(UNIT_TEST_DIR)/generate-clar-suites.sh $< $(UNIT_TEST_DIR)/clar.suite $(UNIT_TEST_DIR)/clar/clar.o: $(UNIT_TEST_DIR)/clar.suite $(CLAR_TEST_OBJS): $(UNIT_TEST_DIR)/clar-decls.h $(CLAR_TEST_OBJS): EXTRA_CPPFLAGS = -I$(UNIT_TEST_DIR) $(CLAR_TEST_PROG): $(UNIT_TEST_DIR)/clar.suite $(CLAR_TEST_OBJS) $(GITLIBS) GIT-LDFLAGS $(call mkdir_p_parent_template) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) .PHONY: build-unit-tests unit-tests build-unit-tests: $(UNIT_TEST_PROGS) $(CLAR_TEST_PROG) unit-tests: $(UNIT_TEST_PROGS) $(CLAR_TEST_PROG) t/helper/test-tool$X $(MAKE) -C t/ unit-tests git-cinnabar-0.7.0/git-core/abspath.c000064400000000000000000000207241046102023000154720ustar 00000000000000#include "git-compat-util.h" #include "abspath.h" #include "strbuf.h" /* * Do not use this for inspecting *tracked* content. When path is a * symlink to a directory, we do not want to say it is a directory when * dealing with tracked content in the working tree. */ int is_directory(const char *path) { struct stat st; return (!stat(path, &st) && S_ISDIR(st.st_mode)); } /* removes the last path component from 'path' except if 'path' is root */ static void strip_last_component(struct strbuf *path) { size_t offset = offset_1st_component(path->buf); size_t len = path->len; /* Find start of the last component */ while (offset < len && !is_dir_sep(path->buf[len - 1])) len--; /* Skip sequences of multiple path-separators */ while (offset < len && is_dir_sep(path->buf[len - 1])) len--; strbuf_setlen(path, len); } /* get (and remove) the next component in 'remaining' and place it in 'next' */ static void get_next_component(struct strbuf *next, struct strbuf *remaining) { char *start = NULL; char *end = NULL; strbuf_reset(next); /* look for the next component */ /* Skip sequences of multiple path-separators */ for (start = remaining->buf; is_dir_sep(*start); start++) ; /* nothing */ /* Find end of the path component */ for (end = start; *end && !is_dir_sep(*end); end++) ; /* nothing */ strbuf_add(next, start, end - start); /* remove the component from 'remaining' */ strbuf_remove(remaining, 0, end - remaining->buf); } /* copies root part from remaining to resolved, canonicalizing it on the way */ static void get_root_part(struct strbuf *resolved, struct strbuf *remaining) { int offset = offset_1st_component(remaining->buf); strbuf_reset(resolved); strbuf_add(resolved, remaining->buf, offset); #ifdef GIT_WINDOWS_NATIVE convert_slashes(resolved->buf); #endif strbuf_remove(remaining, 0, offset); } /* We allow "recursive" symbolic links. Only within reason, though. */ #ifndef MAXSYMLINKS #define MAXSYMLINKS 32 #endif /* * If set, any number of trailing components may be missing; otherwise, only one * may be. */ #define REALPATH_MANY_MISSING (1 << 0) /* Should we die if there's an error? */ #define REALPATH_DIE_ON_ERROR (1 << 1) static char *strbuf_realpath_1(struct strbuf *resolved, const char *path, int flags) { struct strbuf remaining = STRBUF_INIT; struct strbuf next = STRBUF_INIT; struct strbuf symlink = STRBUF_INIT; char *retval = NULL; int num_symlinks = 0; struct stat st; if (!*path) { if (flags & REALPATH_DIE_ON_ERROR) die("The empty string is not a valid path"); else goto error_out; } if (platform_strbuf_realpath(resolved, path)) return resolved->buf; strbuf_addstr(&remaining, path); get_root_part(resolved, &remaining); if (!resolved->len) { /* relative path; can use CWD as the initial resolved path */ if (strbuf_getcwd(resolved)) { if (flags & REALPATH_DIE_ON_ERROR) die_errno("unable to get current working directory"); else goto error_out; } } /* Iterate over the remaining path components */ while (remaining.len > 0) { get_next_component(&next, &remaining); if (next.len == 0) { continue; /* empty component */ } else if (next.len == 1 && !strcmp(next.buf, ".")) { continue; /* '.' component */ } else if (next.len == 2 && !strcmp(next.buf, "..")) { /* '..' component; strip the last path component */ strip_last_component(resolved); continue; } /* append the next component and resolve resultant path */ if (!is_dir_sep(resolved->buf[resolved->len - 1])) strbuf_addch(resolved, '/'); strbuf_addbuf(resolved, &next); if (lstat(resolved->buf, &st)) { /* error out unless this was the last component */ if (errno != ENOENT || (!(flags & REALPATH_MANY_MISSING) && remaining.len)) { if (flags & REALPATH_DIE_ON_ERROR) die_errno("Invalid path '%s'", resolved->buf); else goto error_out; } } else if (S_ISLNK(st.st_mode)) { ssize_t len; strbuf_reset(&symlink); if (num_symlinks++ > MAXSYMLINKS) { errno = ELOOP; if (flags & REALPATH_DIE_ON_ERROR) die("More than %d nested symlinks " "on path '%s'", MAXSYMLINKS, path); else goto error_out; } len = strbuf_readlink(&symlink, resolved->buf, st.st_size); if (len < 0) { if (flags & REALPATH_DIE_ON_ERROR) die_errno("Invalid symlink '%s'", resolved->buf); else goto error_out; } if (is_absolute_path(symlink.buf)) { /* absolute symlink; set resolved to root */ get_root_part(resolved, &symlink); } else { /* * relative symlink * strip off the last component since it will * be replaced with the contents of the symlink */ strip_last_component(resolved); } /* * if there are still remaining components to resolve * then append them to symlink */ if (remaining.len) { strbuf_addch(&symlink, '/'); strbuf_addbuf(&symlink, &remaining); } /* * use the symlink as the remaining components that * need to be resolved */ strbuf_swap(&symlink, &remaining); } } retval = resolved->buf; error_out: strbuf_release(&remaining); strbuf_release(&next); strbuf_release(&symlink); if (!retval) strbuf_reset(resolved); return retval; } /* * Return the real path (i.e., absolute path, with symlinks resolved * and extra slashes removed) equivalent to the specified path. (If * you want an absolute path but don't mind links, use * absolute_path().) Places the resolved realpath in the provided strbuf. * * The directory part of path (i.e., everything up to the last * dir_sep) must denote a valid, existing directory, but the last * component need not exist. If die_on_error is set, then die with an * informative error message if there is a problem. Otherwise, return * NULL on errors (without generating any output). */ char *strbuf_realpath(struct strbuf *resolved, const char *path, int die_on_error) { return strbuf_realpath_1(resolved, path, die_on_error ? REALPATH_DIE_ON_ERROR : 0); } /* * Just like strbuf_realpath, but allows an arbitrary number of path * components to be missing. */ char *strbuf_realpath_forgiving(struct strbuf *resolved, const char *path, int die_on_error) { return strbuf_realpath_1(resolved, path, ((die_on_error ? REALPATH_DIE_ON_ERROR : 0) | REALPATH_MANY_MISSING)); } char *real_pathdup(const char *path, int die_on_error) { struct strbuf realpath = STRBUF_INIT; char *retval = NULL; if (strbuf_realpath(&realpath, path, die_on_error)) retval = strbuf_detach(&realpath, NULL); strbuf_release(&realpath); return retval; } /* * Use this to get an absolute path from a relative one. If you want * to resolve links, you should use strbuf_realpath. */ const char *absolute_path(const char *path) { static struct strbuf sb = STRBUF_INIT; strbuf_reset(&sb); strbuf_add_absolute_path(&sb, path); return sb.buf; } char *absolute_pathdup(const char *path) { struct strbuf sb = STRBUF_INIT; strbuf_add_absolute_path(&sb, path); return strbuf_detach(&sb, NULL); } char *prefix_filename(const char *pfx, const char *arg) { struct strbuf path = STRBUF_INIT; size_t pfx_len = pfx ? strlen(pfx) : 0; if (!pfx_len) ; /* nothing to prefix */ else if (is_absolute_path(arg)) pfx_len = 0; else strbuf_add(&path, pfx, pfx_len); strbuf_addstr(&path, arg); #ifdef GIT_WINDOWS_NATIVE convert_slashes(path.buf + pfx_len); #endif return strbuf_detach(&path, NULL); } char *prefix_filename_except_for_dash(const char *pfx, const char *arg) { if (!strcmp(arg, "-")) return xstrdup(arg); return prefix_filename(pfx, arg); } void strbuf_add_absolute_path(struct strbuf *sb, const char *path) { if (!*path) die("The empty string is not a valid path"); if (!is_absolute_path(path)) { struct stat cwd_stat, pwd_stat; size_t orig_len = sb->len; char *cwd = xgetcwd(); char *pwd = getenv("PWD"); if (pwd && strcmp(pwd, cwd) && !stat(cwd, &cwd_stat) && (cwd_stat.st_dev || cwd_stat.st_ino) && !stat(pwd, &pwd_stat) && pwd_stat.st_dev == cwd_stat.st_dev && pwd_stat.st_ino == cwd_stat.st_ino) strbuf_addstr(sb, pwd); else strbuf_addstr(sb, cwd); if (sb->len > orig_len && !is_dir_sep(sb->buf[sb->len - 1])) strbuf_addch(sb, '/'); free(cwd); } strbuf_addstr(sb, path); } void strbuf_add_real_path(struct strbuf *sb, const char *path) { if (sb->len) { struct strbuf resolved = STRBUF_INIT; strbuf_realpath(&resolved, path, 1); strbuf_addbuf(sb, &resolved); strbuf_release(&resolved); } else strbuf_realpath(sb, path, 1); } git-cinnabar-0.7.0/git-core/abspath.h000064400000000000000000000034511046102023000154750ustar 00000000000000#ifndef ABSPATH_H #define ABSPATH_H int is_directory(const char *); char *strbuf_realpath(struct strbuf *resolved, const char *path, int die_on_error); char *strbuf_realpath_forgiving(struct strbuf *resolved, const char *path, int die_on_error); char *real_pathdup(const char *path, int die_on_error); const char *absolute_path(const char *path); char *absolute_pathdup(const char *path); /* * Concatenate "prefix" (if len is non-zero) and "path", with no * connecting characters (so "prefix" should end with a "/"). * Unlike prefix_path, this should be used if the named file does * not have to interact with index entry; i.e. name of a random file * on the filesystem. * * The return value is always a newly allocated string (even if the * prefix was empty). */ char *prefix_filename(const char *prefix, const char *path); /* Likewise, but path=="-" always yields "-" */ char *prefix_filename_except_for_dash(const char *prefix, const char *path); static inline int is_absolute_path(const char *path) { return is_dir_sep(path[0]) || has_dos_drive_prefix(path); } /** * Add a path to a buffer, converting a relative path to an * absolute one in the process. Symbolic links are not * resolved. */ void strbuf_add_absolute_path(struct strbuf *sb, const char *path); /** * Canonize `path` (make it absolute, resolve symlinks, remove extra * slashes) and append it to `sb`. Die with an informative error * message if there is a problem. * * The directory part of `path` (i.e., everything up to the last * dir_sep) must denote a valid, existing directory, but the last * component need not exist. * * Callers that don't mind links should use the more lightweight * strbuf_add_absolute_path() instead. */ void strbuf_add_real_path(struct strbuf *sb, const char *path); #endif /* ABSPATH_H */ git-cinnabar-0.7.0/git-core/add-interactive.c000064400000000000000000000761361046102023000171230ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "add-interactive.h" #include "color.h" #include "config.h" #include "diffcore.h" #include "gettext.h" #include "hash.h" #include "hex.h" #include "preload-index.h" #include "read-cache-ll.h" #include "repository.h" #include "revision.h" #include "refs.h" #include "string-list.h" #include "lockfile.h" #include "dir.h" #include "run-command.h" #include "prompt.h" #include "tree.h" static void init_color(struct repository *r, struct add_i_state *s, const char *section_and_slot, char *dst, const char *default_color) { char *key = xstrfmt("color.%s", section_and_slot); const char *value; if (!s->use_color) dst[0] = '\0'; else if (repo_config_get_value(r, key, &value) || color_parse(value, dst)) strlcpy(dst, default_color, COLOR_MAXLEN); free(key); } void init_add_i_state(struct add_i_state *s, struct repository *r) { const char *value; s->r = r; if (repo_config_get_value(r, "color.interactive", &value)) s->use_color = -1; else s->use_color = git_config_colorbool("color.interactive", value); s->use_color = want_color(s->use_color); init_color(r, s, "interactive.header", s->header_color, GIT_COLOR_BOLD); init_color(r, s, "interactive.help", s->help_color, GIT_COLOR_BOLD_RED); init_color(r, s, "interactive.prompt", s->prompt_color, GIT_COLOR_BOLD_BLUE); init_color(r, s, "interactive.error", s->error_color, GIT_COLOR_BOLD_RED); init_color(r, s, "diff.frag", s->fraginfo_color, diff_get_color(s->use_color, DIFF_FRAGINFO)); init_color(r, s, "diff.context", s->context_color, "fall back"); if (!strcmp(s->context_color, "fall back")) init_color(r, s, "diff.plain", s->context_color, diff_get_color(s->use_color, DIFF_CONTEXT)); init_color(r, s, "diff.old", s->file_old_color, diff_get_color(s->use_color, DIFF_FILE_OLD)); init_color(r, s, "diff.new", s->file_new_color, diff_get_color(s->use_color, DIFF_FILE_NEW)); strlcpy(s->reset_color, s->use_color ? GIT_COLOR_RESET : "", COLOR_MAXLEN); FREE_AND_NULL(s->interactive_diff_filter); git_config_get_string("interactive.difffilter", &s->interactive_diff_filter); FREE_AND_NULL(s->interactive_diff_algorithm); git_config_get_string("diff.algorithm", &s->interactive_diff_algorithm); git_config_get_bool("interactive.singlekey", &s->use_single_key); if (s->use_single_key) setbuf(stdin, NULL); } void clear_add_i_state(struct add_i_state *s) { FREE_AND_NULL(s->interactive_diff_filter); FREE_AND_NULL(s->interactive_diff_algorithm); memset(s, 0, sizeof(*s)); s->use_color = -1; } /* * A "prefix item list" is a list of items that are identified by a string, and * a unique prefix (if any) is determined for each item. * * It is implemented in the form of a pair of `string_list`s, the first one * duplicating the strings, with the `util` field pointing at a structure whose * first field must be `size_t prefix_length`. * * That `prefix_length` field will be computed by `find_unique_prefixes()`; It * will be set to zero if no valid, unique prefix could be found. * * The second `string_list` is called `sorted` and does _not_ duplicate the * strings but simply reuses the first one's, with the `util` field pointing at * the `string_item_list` of the first `string_list`. It will be populated and * sorted by `find_unique_prefixes()`. */ struct prefix_item_list { struct string_list items; struct string_list sorted; int *selected; /* for multi-selections */ size_t min_length, max_length; }; #define PREFIX_ITEM_LIST_INIT { \ .items = STRING_LIST_INIT_DUP, \ .sorted = STRING_LIST_INIT_NODUP, \ .min_length = 1, \ .max_length = 4, \ } static void prefix_item_list_clear(struct prefix_item_list *list) { string_list_clear(&list->items, 1); string_list_clear(&list->sorted, 0); FREE_AND_NULL(list->selected); } static void extend_prefix_length(struct string_list_item *p, const char *other_string, size_t max_length) { size_t *len = p->util; if (!*len || memcmp(p->string, other_string, *len)) return; for (;;) { char c = p->string[*len]; /* * Is `p` a strict prefix of `other`? Or have we exhausted the * maximal length of the prefix? Or is the current character a * multi-byte UTF-8 one? If so, there is no valid, unique * prefix. */ if (!c || ++*len > max_length || !isascii(c)) { *len = 0; break; } if (c != other_string[*len - 1]) break; } } static void find_unique_prefixes(struct prefix_item_list *list) { size_t i; if (list->sorted.nr == list->items.nr) return; string_list_clear(&list->sorted, 0); /* Avoid reallocating incrementally */ list->sorted.items = xmalloc(st_mult(sizeof(*list->sorted.items), list->items.nr)); list->sorted.nr = list->sorted.alloc = list->items.nr; for (i = 0; i < list->items.nr; i++) { list->sorted.items[i].string = list->items.items[i].string; list->sorted.items[i].util = list->items.items + i; } string_list_sort(&list->sorted); for (i = 0; i < list->sorted.nr; i++) { struct string_list_item *sorted_item = list->sorted.items + i; struct string_list_item *item = sorted_item->util; size_t *len = item->util; *len = 0; while (*len < list->min_length) { char c = item->string[(*len)++]; if (!c || !isascii(c)) { *len = 0; break; } } if (i > 0) extend_prefix_length(item, sorted_item[-1].string, list->max_length); if (i + 1 < list->sorted.nr) extend_prefix_length(item, sorted_item[1].string, list->max_length); } } static ssize_t find_unique(const char *string, struct prefix_item_list *list) { int index = string_list_find_insert_index(&list->sorted, string, 1); struct string_list_item *item; if (list->items.nr != list->sorted.nr) BUG("prefix_item_list in inconsistent state (%"PRIuMAX " vs %"PRIuMAX")", (uintmax_t)list->items.nr, (uintmax_t)list->sorted.nr); if (index < 0) item = list->sorted.items[-1 - index].util; else if (index > 0 && starts_with(list->sorted.items[index - 1].string, string)) return -1; else if (index + 1 < list->sorted.nr && starts_with(list->sorted.items[index + 1].string, string)) return -1; else if (index < list->sorted.nr && starts_with(list->sorted.items[index].string, string)) item = list->sorted.items[index].util; else return -1; return item - list->items.items; } struct list_options { int columns; const char *header; void (*print_item)(int i, int selected, struct string_list_item *item, void *print_item_data); void *print_item_data; }; static void list(struct add_i_state *s, struct string_list *list, int *selected, struct list_options *opts) { int i, last_lf = 0; if (!list->nr) return; if (opts->header) color_fprintf_ln(stdout, s->header_color, "%s", opts->header); for (i = 0; i < list->nr; i++) { opts->print_item(i, selected ? selected[i] : 0, list->items + i, opts->print_item_data); if ((opts->columns) && ((i + 1) % (opts->columns))) { putchar('\t'); last_lf = 0; } else { putchar('\n'); last_lf = 1; } } if (!last_lf) putchar('\n'); } struct list_and_choose_options { struct list_options list_opts; const char *prompt; enum { SINGLETON = (1<<0), IMMEDIATE = (1<<1), } flags; void (*print_help)(struct add_i_state *s); }; #define LIST_AND_CHOOSE_ERROR (-1) #define LIST_AND_CHOOSE_QUIT (-2) /* * Returns the selected index in singleton mode, the number of selected items * otherwise. * * If an error occurred, returns `LIST_AND_CHOOSE_ERROR`. Upon EOF, * `LIST_AND_CHOOSE_QUIT` is returned. */ static ssize_t list_and_choose(struct add_i_state *s, struct prefix_item_list *items, struct list_and_choose_options *opts) { int singleton = opts->flags & SINGLETON; int immediate = opts->flags & IMMEDIATE; struct strbuf input = STRBUF_INIT; ssize_t res = singleton ? LIST_AND_CHOOSE_ERROR : 0; if (!singleton) { free(items->selected); CALLOC_ARRAY(items->selected, items->items.nr); } if (singleton && !immediate) BUG("singleton requires immediate"); find_unique_prefixes(items); for (;;) { char *p; strbuf_reset(&input); list(s, &items->items, items->selected, &opts->list_opts); color_fprintf(stdout, s->prompt_color, "%s", opts->prompt); fputs(singleton ? "> " : ">> ", stdout); fflush(stdout); if (git_read_line_interactively(&input) == EOF) { putchar('\n'); if (immediate) res = LIST_AND_CHOOSE_QUIT; break; } if (!input.len) break; if (!strcmp(input.buf, "?")) { opts->print_help(s); continue; } p = input.buf; for (;;) { size_t sep = strcspn(p, " \t\r\n,"); int choose = 1; /* `from` is inclusive, `to` is exclusive */ ssize_t from = -1, to = -1; if (!sep) { if (!*p) break; p++; continue; } /* Input that begins with '-'; de-select */ if (*p == '-') { choose = 0; p++; sep--; } if (sep == 1 && *p == '*') { from = 0; to = items->items.nr; } else if (isdigit(*p)) { char *endp; /* * A range can be specified like 5-7 or 5-. * * Note: `from` is 0-based while the user input * is 1-based, hence we have to decrement by * one. We do not have to decrement `to` even * if it is 0-based because it is an exclusive * boundary. */ from = strtoul(p, &endp, 10) - 1; if (endp == p + sep) to = from + 1; else if (*endp == '-') { if (isdigit(*(++endp))) to = strtoul(endp, &endp, 10); else to = items->items.nr; /* extra characters after the range? */ if (endp != p + sep) from = -1; } } if (p[sep]) p[sep++] = '\0'; if (from < 0) { from = find_unique(p, items); if (from >= 0) to = from + 1; } if (from < 0 || from >= items->items.nr || (singleton && from + 1 != to)) { color_fprintf_ln(stderr, s->error_color, _("Huh (%s)?"), p); break; } else if (singleton) { res = from; break; } if (to > items->items.nr) to = items->items.nr; for (; from < to; from++) if (items->selected[from] != choose) { items->selected[from] = choose; res += choose ? +1 : -1; } p += sep; } if ((immediate && res != LIST_AND_CHOOSE_ERROR) || !strcmp(input.buf, "*")) break; } strbuf_release(&input); return res; } struct adddel { uintmax_t add, del; unsigned seen:1, unmerged:1, binary:1; }; struct file_item { size_t prefix_length; struct adddel index, worktree; }; static void add_file_item(struct string_list *files, const char *name) { struct file_item *item = xcalloc(1, sizeof(*item)); string_list_append(files, name)->util = item; } struct pathname_entry { struct hashmap_entry ent; const char *name; struct file_item *item; }; static int pathname_entry_cmp(const void *cmp_data UNUSED, const struct hashmap_entry *he1, const struct hashmap_entry *he2, const void *name) { const struct pathname_entry *e1 = container_of(he1, const struct pathname_entry, ent); const struct pathname_entry *e2 = container_of(he2, const struct pathname_entry, ent); return strcmp(e1->name, name ? (const char *)name : e2->name); } struct collection_status { enum { FROM_WORKTREE = 0, FROM_INDEX = 1 } mode; const char *reference; unsigned skip_unseen:1; size_t unmerged_count, binary_count; struct string_list *files; struct hashmap file_map; }; static void collect_changes_cb(struct diff_queue_struct *q, struct diff_options *options, void *data) { struct collection_status *s = data; struct diffstat_t stat = { 0 }; int i; if (!q->nr) return; compute_diffstat(options, &stat, q); for (i = 0; i < stat.nr; i++) { const char *name = stat.files[i]->name; int hash = strhash(name); struct pathname_entry *entry; struct file_item *file_item; struct adddel *adddel, *other_adddel; entry = hashmap_get_entry_from_hash(&s->file_map, hash, name, struct pathname_entry, ent); if (!entry) { if (s->skip_unseen) continue; add_file_item(s->files, name); CALLOC_ARRAY(entry, 1); hashmap_entry_init(&entry->ent, hash); entry->name = s->files->items[s->files->nr - 1].string; entry->item = s->files->items[s->files->nr - 1].util; hashmap_add(&s->file_map, &entry->ent); } file_item = entry->item; adddel = s->mode == FROM_INDEX ? &file_item->index : &file_item->worktree; other_adddel = s->mode == FROM_INDEX ? &file_item->worktree : &file_item->index; adddel->seen = 1; adddel->add = stat.files[i]->added; adddel->del = stat.files[i]->deleted; if (stat.files[i]->is_binary) { if (!other_adddel->binary) s->binary_count++; adddel->binary = 1; } if (stat.files[i]->is_unmerged) { if (!other_adddel->unmerged) s->unmerged_count++; adddel->unmerged = 1; } } free_diffstat_info(&stat); } enum modified_files_filter { NO_FILTER = 0, WORKTREE_ONLY = 1, INDEX_ONLY = 2, }; static int get_modified_files(struct repository *r, enum modified_files_filter filter, struct prefix_item_list *files, const struct pathspec *ps, size_t *unmerged_count, size_t *binary_count) { struct object_id head_oid; int is_initial = !refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", RESOLVE_REF_READING, &head_oid, NULL); struct collection_status s = { 0 }; int i; discard_index(r->index); if (repo_read_index_preload(r, ps, 0) < 0) return error(_("could not read index")); prefix_item_list_clear(files); s.files = &files->items; hashmap_init(&s.file_map, pathname_entry_cmp, NULL, 0); for (i = 0; i < 2; i++) { struct rev_info rev; struct setup_revision_opt opt = { 0 }; if (filter == INDEX_ONLY) s.mode = (i == 0) ? FROM_INDEX : FROM_WORKTREE; else s.mode = (i == 0) ? FROM_WORKTREE : FROM_INDEX; s.skip_unseen = filter && i; opt.def = is_initial ? empty_tree_oid_hex(the_repository->hash_algo) : oid_to_hex(&head_oid); repo_init_revisions(r, &rev, NULL); setup_revisions(0, NULL, &rev, &opt); rev.diffopt.output_format = DIFF_FORMAT_CALLBACK; rev.diffopt.format_callback = collect_changes_cb; rev.diffopt.format_callback_data = &s; if (ps) copy_pathspec(&rev.prune_data, ps); if (s.mode == FROM_INDEX) run_diff_index(&rev, DIFF_INDEX_CACHED); else { rev.diffopt.flags.ignore_dirty_submodules = 1; run_diff_files(&rev, 0); } release_revisions(&rev); } hashmap_clear_and_free(&s.file_map, struct pathname_entry, ent); if (unmerged_count) *unmerged_count = s.unmerged_count; if (binary_count) *binary_count = s.binary_count; /* While the diffs are ordered already, we ran *two* diffs... */ string_list_sort(&files->items); return 0; } static void render_adddel(struct strbuf *buf, struct adddel *ad, const char *no_changes) { if (ad->binary) strbuf_addstr(buf, _("binary")); else if (ad->seen) strbuf_addf(buf, "+%"PRIuMAX"/-%"PRIuMAX, (uintmax_t)ad->add, (uintmax_t)ad->del); else strbuf_addstr(buf, no_changes); } /* filters out prefixes which have special meaning to list_and_choose() */ static int is_valid_prefix(const char *prefix, size_t prefix_len) { return prefix_len && prefix && /* * We expect `prefix` to be NUL terminated, therefore this * `strcspn()` call is okay, even if it might do much more * work than strictly necessary. */ strcspn(prefix, " \t\r\n,") >= prefix_len && /* separators */ *prefix != '-' && /* deselection */ !isdigit(*prefix) && /* selection */ (prefix_len != 1 || (*prefix != '*' && /* "all" wildcard */ *prefix != '?')); /* prompt help */ } struct print_file_item_data { const char *modified_fmt, *color, *reset; struct strbuf buf, name, index, worktree; unsigned only_names:1; }; static void print_file_item(int i, int selected, struct string_list_item *item, void *print_file_item_data) { struct file_item *c = item->util; struct print_file_item_data *d = print_file_item_data; const char *highlighted = NULL; strbuf_reset(&d->index); strbuf_reset(&d->worktree); strbuf_reset(&d->buf); /* Format the item with the prefix highlighted. */ if (c->prefix_length > 0 && is_valid_prefix(item->string, c->prefix_length)) { strbuf_reset(&d->name); strbuf_addf(&d->name, "%s%.*s%s%s", d->color, (int)c->prefix_length, item->string, d->reset, item->string + c->prefix_length); highlighted = d->name.buf; } if (d->only_names) { printf("%c%2d: %s", selected ? '*' : ' ', i + 1, highlighted ? highlighted : item->string); return; } render_adddel(&d->worktree, &c->worktree, _("nothing")); render_adddel(&d->index, &c->index, _("unchanged")); strbuf_addf(&d->buf, d->modified_fmt, d->index.buf, d->worktree.buf, highlighted ? highlighted : item->string); printf("%c%2d: %s", selected ? '*' : ' ', i + 1, d->buf.buf); } static int run_status(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts) { if (get_modified_files(s->r, NO_FILTER, files, ps, NULL, NULL) < 0) return -1; list(s, &files->items, NULL, &opts->list_opts); putchar('\n'); return 0; } static int run_update(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts) { int res = 0, fd; size_t count, i; struct lock_file index_lock; if (get_modified_files(s->r, WORKTREE_ONLY, files, ps, NULL, NULL) < 0) return -1; if (!files->items.nr) { putchar('\n'); return 0; } opts->prompt = N_("Update"); count = list_and_choose(s, files, opts); if (count <= 0) { putchar('\n'); return 0; } fd = repo_hold_locked_index(s->r, &index_lock, LOCK_REPORT_ON_ERROR); if (fd < 0) { putchar('\n'); return -1; } for (i = 0; i < files->items.nr; i++) { const char *name = files->items.items[i].string; struct stat st; if (!files->selected[i]) continue; if (lstat(name, &st) && is_missing_file_error(errno)) { if (remove_file_from_index(s->r->index, name) < 0) { res = error(_("could not stage '%s'"), name); break; } } else if (add_file_to_index(s->r->index, name, 0) < 0) { res = error(_("could not stage '%s'"), name); break; } } if (!res && write_locked_index(s->r->index, &index_lock, COMMIT_LOCK) < 0) res = error(_("could not write index")); if (!res) printf(Q_("updated %d path\n", "updated %d paths\n", count), (int)count); putchar('\n'); return res; } static void revert_from_diff(struct diff_queue_struct *q, struct diff_options *opt, void *data UNUSED) { int i, add_flags = ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE; for (i = 0; i < q->nr; i++) { struct diff_filespec *one = q->queue[i]->one; struct cache_entry *ce; if (!(one->mode && !is_null_oid(&one->oid))) { remove_file_from_index(opt->repo->index, one->path); printf(_("note: %s is untracked now.\n"), one->path); } else { ce = make_cache_entry(opt->repo->index, one->mode, &one->oid, one->path, 0, 0); if (!ce) die(_("make_cache_entry failed for path '%s'"), one->path); add_index_entry(opt->repo->index, ce, add_flags); } } } static int run_revert(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts) { int res = 0, fd; size_t count, i, j; struct object_id oid; int is_initial = !refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", RESOLVE_REF_READING, &oid, NULL); struct lock_file index_lock; const char **paths; struct tree *tree; struct diff_options diffopt = { NULL }; if (get_modified_files(s->r, INDEX_ONLY, files, ps, NULL, NULL) < 0) return -1; if (!files->items.nr) { putchar('\n'); return 0; } opts->prompt = N_("Revert"); count = list_and_choose(s, files, opts); if (count <= 0) goto finish_revert; fd = repo_hold_locked_index(s->r, &index_lock, LOCK_REPORT_ON_ERROR); if (fd < 0) { res = -1; goto finish_revert; } if (is_initial) oidcpy(&oid, s->r->hash_algo->empty_tree); else { tree = parse_tree_indirect(&oid); if (!tree) { res = error(_("Could not parse HEAD^{tree}")); goto finish_revert; } oidcpy(&oid, &tree->object.oid); } ALLOC_ARRAY(paths, count + 1); for (i = j = 0; i < files->items.nr; i++) if (files->selected[i]) paths[j++] = files->items.items[i].string; paths[j] = NULL; parse_pathspec(&diffopt.pathspec, 0, PATHSPEC_PREFER_FULL | PATHSPEC_LITERAL_PATH, NULL, paths); diffopt.output_format = DIFF_FORMAT_CALLBACK; diffopt.format_callback = revert_from_diff; diffopt.flags.override_submodule_config = 1; diffopt.repo = s->r; if (do_diff_cache(&oid, &diffopt)) { diff_free(&diffopt); res = -1; } else { diffcore_std(&diffopt); diff_flush(&diffopt); } free(paths); if (!res && write_locked_index(s->r->index, &index_lock, COMMIT_LOCK) < 0) res = -1; else res = repo_refresh_and_write_index(s->r, REFRESH_QUIET, 0, 1, NULL, NULL, NULL); if (!res) printf(Q_("reverted %d path\n", "reverted %d paths\n", count), (int)count); finish_revert: putchar('\n'); return res; } static int get_untracked_files(struct repository *r, struct prefix_item_list *files, const struct pathspec *ps) { struct dir_struct dir = { 0 }; size_t i; struct strbuf buf = STRBUF_INIT; if (repo_read_index(r) < 0) return error(_("could not read index")); prefix_item_list_clear(files); setup_standard_excludes(&dir); add_pattern_list(&dir, EXC_CMDL, "--exclude option"); fill_directory(&dir, r->index, ps); for (i = 0; i < dir.nr; i++) { struct dir_entry *ent = dir.entries[i]; if (index_name_is_other(r->index, ent->name, ent->len)) { strbuf_reset(&buf); strbuf_add(&buf, ent->name, ent->len); add_file_item(&files->items, buf.buf); } } strbuf_release(&buf); dir_clear(&dir); return 0; } static int run_add_untracked(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts) { struct print_file_item_data *d = opts->list_opts.print_item_data; int res = 0, fd; size_t count, i; struct lock_file index_lock; if (get_untracked_files(s->r, files, ps) < 0) return -1; if (!files->items.nr) { printf(_("No untracked files.\n")); goto finish_add_untracked; } opts->prompt = N_("Add untracked"); d->only_names = 1; count = list_and_choose(s, files, opts); d->only_names = 0; if (count <= 0) goto finish_add_untracked; fd = repo_hold_locked_index(s->r, &index_lock, LOCK_REPORT_ON_ERROR); if (fd < 0) { res = -1; goto finish_add_untracked; } for (i = 0; i < files->items.nr; i++) { const char *name = files->items.items[i].string; if (files->selected[i] && add_file_to_index(s->r->index, name, 0) < 0) { res = error(_("could not stage '%s'"), name); break; } } if (!res && write_locked_index(s->r->index, &index_lock, COMMIT_LOCK) < 0) res = error(_("could not write index")); if (!res) printf(Q_("added %d path\n", "added %d paths\n", count), (int)count); finish_add_untracked: putchar('\n'); return res; } static int run_patch(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts) { int res = 0; ssize_t count, i, j; size_t unmerged_count = 0, binary_count = 0; if (get_modified_files(s->r, WORKTREE_ONLY, files, ps, &unmerged_count, &binary_count) < 0) return -1; if (unmerged_count || binary_count) { for (i = j = 0; i < files->items.nr; i++) { struct file_item *item = files->items.items[i].util; if (item->index.binary || item->worktree.binary) { free(item); free(files->items.items[i].string); } else if (item->index.unmerged || item->worktree.unmerged) { color_fprintf_ln(stderr, s->error_color, _("ignoring unmerged: %s"), files->items.items[i].string); free(item); free(files->items.items[i].string); } else files->items.items[j++] = files->items.items[i]; } files->items.nr = j; } if (!files->items.nr) { if (binary_count) fprintf(stderr, _("Only binary files changed.\n")); else fprintf(stderr, _("No changes.\n")); return 0; } opts->prompt = N_("Patch update"); count = list_and_choose(s, files, opts); if (count > 0) { struct strvec args = STRVEC_INIT; struct pathspec ps_selected = { 0 }; for (i = 0; i < files->items.nr; i++) if (files->selected[i]) strvec_push(&args, files->items.items[i].string); parse_pathspec(&ps_selected, PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, PATHSPEC_LITERAL_PATH, "", args.v); res = run_add_p(s->r, ADD_P_ADD, NULL, &ps_selected); strvec_clear(&args); clear_pathspec(&ps_selected); } return res; } static int run_diff(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts) { int res = 0; ssize_t count, i; struct object_id oid; int is_initial = !refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", RESOLVE_REF_READING, &oid, NULL); if (get_modified_files(s->r, INDEX_ONLY, files, ps, NULL, NULL) < 0) return -1; if (!files->items.nr) { putchar('\n'); return 0; } opts->prompt = N_("Review diff"); opts->flags = IMMEDIATE; count = list_and_choose(s, files, opts); opts->flags = 0; if (count > 0) { struct child_process cmd = CHILD_PROCESS_INIT; strvec_pushl(&cmd.args, "git", "diff", "-p", "--cached", oid_to_hex(!is_initial ? &oid : s->r->hash_algo->empty_tree), "--", NULL); for (i = 0; i < files->items.nr; i++) if (files->selected[i]) strvec_push(&cmd.args, files->items.items[i].string); res = run_command(&cmd); } putchar('\n'); return res; } static int run_help(struct add_i_state *s, const struct pathspec *ps UNUSED, struct prefix_item_list *files UNUSED, struct list_and_choose_options *opts UNUSED) { color_fprintf_ln(stdout, s->help_color, "status - %s", _("show paths with changes")); color_fprintf_ln(stdout, s->help_color, "update - %s", _("add working tree state to the staged set of changes")); color_fprintf_ln(stdout, s->help_color, "revert - %s", _("revert staged set of changes back to the HEAD version")); color_fprintf_ln(stdout, s->help_color, "patch - %s", _("pick hunks and update selectively")); color_fprintf_ln(stdout, s->help_color, "diff - %s", _("view diff between HEAD and index")); color_fprintf_ln(stdout, s->help_color, "add untracked - %s", _("add contents of untracked files to the staged set of changes")); return 0; } static void choose_prompt_help(struct add_i_state *s) { color_fprintf_ln(stdout, s->help_color, "%s", _("Prompt help:")); color_fprintf_ln(stdout, s->help_color, "1 - %s", _("select a single item")); color_fprintf_ln(stdout, s->help_color, "3-5 - %s", _("select a range of items")); color_fprintf_ln(stdout, s->help_color, "2-3,6-9 - %s", _("select multiple ranges")); color_fprintf_ln(stdout, s->help_color, "foo - %s", _("select item based on unique prefix")); color_fprintf_ln(stdout, s->help_color, "-... - %s", _("unselect specified items")); color_fprintf_ln(stdout, s->help_color, "* - %s", _("choose all items")); color_fprintf_ln(stdout, s->help_color, " - %s", _("(empty) finish selecting")); } typedef int (*command_t)(struct add_i_state *s, const struct pathspec *ps, struct prefix_item_list *files, struct list_and_choose_options *opts); struct command_item { size_t prefix_length; command_t command; }; struct print_command_item_data { const char *color, *reset; }; static void print_command_item(int i, int selected UNUSED, struct string_list_item *item, void *print_command_item_data) { struct print_command_item_data *d = print_command_item_data; struct command_item *util = item->util; if (!util->prefix_length || !is_valid_prefix(item->string, util->prefix_length)) printf(" %2d: %s", i + 1, item->string); else printf(" %2d: %s%.*s%s%s", i + 1, d->color, (int)util->prefix_length, item->string, d->reset, item->string + util->prefix_length); } static void command_prompt_help(struct add_i_state *s) { const char *help_color = s->help_color; color_fprintf_ln(stdout, help_color, "%s", _("Prompt help:")); color_fprintf_ln(stdout, help_color, "1 - %s", _("select a numbered item")); color_fprintf_ln(stdout, help_color, "foo - %s", _("select item based on unique prefix")); color_fprintf_ln(stdout, help_color, " - %s", _("(empty) select nothing")); } int run_add_i(struct repository *r, const struct pathspec *ps) { struct add_i_state s = { NULL }; struct print_command_item_data data = { "[", "]" }; struct list_and_choose_options main_loop_opts = { { 4, N_("*** Commands ***"), print_command_item, &data }, N_("What now"), SINGLETON | IMMEDIATE, command_prompt_help }; struct { const char *string; command_t command; } command_list[] = { { "status", run_status }, { "update", run_update }, { "revert", run_revert }, { "add untracked", run_add_untracked }, { "patch", run_patch }, { "diff", run_diff }, { "quit", NULL }, { "help", run_help }, }; struct prefix_item_list commands = PREFIX_ITEM_LIST_INIT; struct print_file_item_data print_file_item_data = { "%12s %12s %s", NULL, NULL, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT }; struct list_and_choose_options opts = { { 0, NULL, print_file_item, &print_file_item_data }, NULL, 0, choose_prompt_help }; struct strbuf header = STRBUF_INIT; struct prefix_item_list files = PREFIX_ITEM_LIST_INIT; ssize_t i; int res = 0; for (i = 0; i < ARRAY_SIZE(command_list); i++) { struct command_item *util = xcalloc(1, sizeof(*util)); util->command = command_list[i].command; string_list_append(&commands.items, command_list[i].string) ->util = util; } init_add_i_state(&s, r); /* * When color was asked for, use the prompt color for * highlighting, otherwise use square brackets. */ if (s.use_color) { data.color = s.prompt_color; data.reset = s.reset_color; } print_file_item_data.color = data.color; print_file_item_data.reset = data.reset; strbuf_addstr(&header, " "); strbuf_addf(&header, print_file_item_data.modified_fmt, _("staged"), _("unstaged"), _("path")); opts.list_opts.header = header.buf; discard_index(r->index); if (repo_read_index(r) < 0 || repo_refresh_and_write_index(r, REFRESH_QUIET, 0, 1, NULL, NULL, NULL) < 0) warning(_("could not refresh index")); res = run_status(&s, ps, &files, &opts); for (;;) { struct command_item *util; i = list_and_choose(&s, &commands, &main_loop_opts); if (i < 0 || i >= commands.items.nr) util = NULL; else util = commands.items.items[i].util; if (i == LIST_AND_CHOOSE_QUIT || (util && !util->command)) { printf(_("Bye.\n")); res = 0; break; } if (util) res = util->command(&s, ps, &files, &opts); } prefix_item_list_clear(&files); strbuf_release(&print_file_item_data.buf); strbuf_release(&print_file_item_data.name); strbuf_release(&print_file_item_data.index); strbuf_release(&print_file_item_data.worktree); strbuf_release(&header); prefix_item_list_clear(&commands); clear_add_i_state(&s); return res; } git-cinnabar-0.7.0/git-core/add-interactive.h000064400000000000000000000017051046102023000171160ustar 00000000000000#ifndef ADD_INTERACTIVE_H #define ADD_INTERACTIVE_H #include "color.h" struct add_i_state { struct repository *r; int use_color; char header_color[COLOR_MAXLEN]; char help_color[COLOR_MAXLEN]; char prompt_color[COLOR_MAXLEN]; char error_color[COLOR_MAXLEN]; char reset_color[COLOR_MAXLEN]; char fraginfo_color[COLOR_MAXLEN]; char context_color[COLOR_MAXLEN]; char file_old_color[COLOR_MAXLEN]; char file_new_color[COLOR_MAXLEN]; int use_single_key; char *interactive_diff_filter, *interactive_diff_algorithm; }; void init_add_i_state(struct add_i_state *s, struct repository *r); void clear_add_i_state(struct add_i_state *s); struct repository; struct pathspec; int run_add_i(struct repository *r, const struct pathspec *ps); enum add_p_mode { ADD_P_ADD, ADD_P_STASH, ADD_P_RESET, ADD_P_CHECKOUT, ADD_P_WORKTREE, }; int run_add_p(struct repository *r, enum add_p_mode mode, const char *revision, const struct pathspec *ps); #endif git-cinnabar-0.7.0/git-core/add-patch.c000064400000000000000000001477001046102023000157010ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "add-interactive.h" #include "advice.h" #include "editor.h" #include "environment.h" #include "gettext.h" #include "object-name.h" #include "pager.h" #include "read-cache-ll.h" #include "repository.h" #include "strbuf.h" #include "sigchain.h" #include "run-command.h" #include "strvec.h" #include "pathspec.h" #include "color.h" #include "compat/terminal.h" #include "prompt.h" enum prompt_mode_type { PROMPT_MODE_CHANGE = 0, PROMPT_DELETION, PROMPT_ADDITION, PROMPT_HUNK, PROMPT_MODE_MAX, /* must be last */ }; struct patch_mode { /* * The magic constant 4 is chosen such that all patch modes * provide enough space for three command-line arguments followed by a * trailing `NULL`. */ const char *diff_cmd[4], *apply_args[4], *apply_check_args[4]; unsigned is_reverse:1, index_only:1, apply_for_checkout:1; const char *prompt_mode[PROMPT_MODE_MAX]; const char *edit_hunk_hint, *help_patch_text; }; static struct patch_mode patch_mode_add = { .diff_cmd = { "diff-files", NULL }, .apply_args = { "--cached", NULL }, .apply_check_args = { "--cached", NULL }, .prompt_mode = { N_("Stage mode change [y,n,q,a,d%s,?]? "), N_("Stage deletion [y,n,q,a,d%s,?]? "), N_("Stage addition [y,n,q,a,d%s,?]? "), N_("Stage this hunk [y,n,q,a,d%s,?]? ") }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for staging."), .help_patch_text = N_("y - stage this hunk\n" "n - do not stage this hunk\n" "q - quit; do not stage this hunk or any of the remaining " "ones\n" "a - stage this hunk and all later hunks in the file\n" "d - do not stage this hunk or any of the later hunks in " "the file\n") }; static struct patch_mode patch_mode_stash = { .diff_cmd = { "diff-index", "HEAD", NULL }, .apply_args = { "--cached", NULL }, .apply_check_args = { "--cached", NULL }, .prompt_mode = { N_("Stash mode change [y,n,q,a,d%s,?]? "), N_("Stash deletion [y,n,q,a,d%s,?]? "), N_("Stash addition [y,n,q,a,d%s,?]? "), N_("Stash this hunk [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for stashing."), .help_patch_text = N_("y - stash this hunk\n" "n - do not stash this hunk\n" "q - quit; do not stash this hunk or any of the remaining " "ones\n" "a - stash this hunk and all later hunks in the file\n" "d - do not stash this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_reset_head = { .diff_cmd = { "diff-index", "--cached", NULL }, .apply_args = { "-R", "--cached", NULL }, .apply_check_args = { "-R", "--cached", NULL }, .is_reverse = 1, .index_only = 1, .prompt_mode = { N_("Unstage mode change [y,n,q,a,d%s,?]? "), N_("Unstage deletion [y,n,q,a,d%s,?]? "), N_("Unstage addition [y,n,q,a,d%s,?]? "), N_("Unstage this hunk [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for unstaging."), .help_patch_text = N_("y - unstage this hunk\n" "n - do not unstage this hunk\n" "q - quit; do not unstage this hunk or any of the remaining " "ones\n" "a - unstage this hunk and all later hunks in the file\n" "d - do not unstage this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_reset_nothead = { .diff_cmd = { "diff-index", "-R", "--cached", NULL }, .apply_args = { "--cached", NULL }, .apply_check_args = { "--cached", NULL }, .index_only = 1, .prompt_mode = { N_("Apply mode change to index [y,n,q,a,d%s,?]? "), N_("Apply deletion to index [y,n,q,a,d%s,?]? "), N_("Apply addition to index [y,n,q,a,d%s,?]? "), N_("Apply this hunk to index [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for applying."), .help_patch_text = N_("y - apply this hunk to index\n" "n - do not apply this hunk to index\n" "q - quit; do not apply this hunk or any of the remaining " "ones\n" "a - apply this hunk and all later hunks in the file\n" "d - do not apply this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_checkout_index = { .diff_cmd = { "diff-files", NULL }, .apply_args = { "-R", NULL }, .apply_check_args = { "-R", NULL }, .is_reverse = 1, .prompt_mode = { N_("Discard mode change from worktree [y,n,q,a,d%s,?]? "), N_("Discard deletion from worktree [y,n,q,a,d%s,?]? "), N_("Discard addition from worktree [y,n,q,a,d%s,?]? "), N_("Discard this hunk from worktree [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for discarding."), .help_patch_text = N_("y - discard this hunk from worktree\n" "n - do not discard this hunk from worktree\n" "q - quit; do not discard this hunk or any of the remaining " "ones\n" "a - discard this hunk and all later hunks in the file\n" "d - do not discard this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_checkout_head = { .diff_cmd = { "diff-index", NULL }, .apply_for_checkout = 1, .apply_check_args = { "-R", NULL }, .is_reverse = 1, .prompt_mode = { N_("Discard mode change from index and worktree [y,n,q,a,d%s,?]? "), N_("Discard deletion from index and worktree [y,n,q,a,d%s,?]? "), N_("Discard addition from index and worktree [y,n,q,a,d%s,?]? "), N_("Discard this hunk from index and worktree [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for discarding."), .help_patch_text = N_("y - discard this hunk from index and worktree\n" "n - do not discard this hunk from index and worktree\n" "q - quit; do not discard this hunk or any of the remaining " "ones\n" "a - discard this hunk and all later hunks in the file\n" "d - do not discard this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_checkout_nothead = { .diff_cmd = { "diff-index", "-R", NULL }, .apply_for_checkout = 1, .apply_check_args = { NULL }, .prompt_mode = { N_("Apply mode change to index and worktree [y,n,q,a,d%s,?]? "), N_("Apply deletion to index and worktree [y,n,q,a,d%s,?]? "), N_("Apply addition to index and worktree [y,n,q,a,d%s,?]? "), N_("Apply this hunk to index and worktree [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for applying."), .help_patch_text = N_("y - apply this hunk to index and worktree\n" "n - do not apply this hunk to index and worktree\n" "q - quit; do not apply this hunk or any of the remaining " "ones\n" "a - apply this hunk and all later hunks in the file\n" "d - do not apply this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_worktree_head = { .diff_cmd = { "diff-index", NULL }, .apply_args = { "-R", NULL }, .apply_check_args = { "-R", NULL }, .is_reverse = 1, .prompt_mode = { N_("Discard mode change from worktree [y,n,q,a,d%s,?]? "), N_("Discard deletion from worktree [y,n,q,a,d%s,?]? "), N_("Discard addition from worktree [y,n,q,a,d%s,?]? "), N_("Discard this hunk from worktree [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for discarding."), .help_patch_text = N_("y - discard this hunk from worktree\n" "n - do not discard this hunk from worktree\n" "q - quit; do not discard this hunk or any of the remaining " "ones\n" "a - discard this hunk and all later hunks in the file\n" "d - do not discard this hunk or any of the later hunks in " "the file\n"), }; static struct patch_mode patch_mode_worktree_nothead = { .diff_cmd = { "diff-index", "-R", NULL }, .apply_args = { NULL }, .apply_check_args = { NULL }, .prompt_mode = { N_("Apply mode change to worktree [y,n,q,a,d%s,?]? "), N_("Apply deletion to worktree [y,n,q,a,d%s,?]? "), N_("Apply addition to worktree [y,n,q,a,d%s,?]? "), N_("Apply this hunk to worktree [y,n,q,a,d%s,?]? "), }, .edit_hunk_hint = N_("If the patch applies cleanly, the edited hunk " "will immediately be marked for applying."), .help_patch_text = N_("y - apply this hunk to worktree\n" "n - do not apply this hunk to worktree\n" "q - quit; do not apply this hunk or any of the remaining " "ones\n" "a - apply this hunk and all later hunks in the file\n" "d - do not apply this hunk or any of the later hunks in " "the file\n"), }; struct hunk_header { unsigned long old_offset, old_count, new_offset, new_count; /* * Start/end offsets to the extra text after the second `@@` in the * hunk header, e.g. the function signature. This is expected to * include the newline. */ size_t extra_start, extra_end, colored_extra_start, colored_extra_end; unsigned suppress_colored_line_range:1; }; struct hunk { size_t start, end, colored_start, colored_end, splittable_into; ssize_t delta; enum { UNDECIDED_HUNK = 0, SKIP_HUNK, USE_HUNK } use; struct hunk_header header; }; struct add_p_state { struct add_i_state s; struct strbuf answer, buf; /* parsed diff */ struct strbuf plain, colored; struct file_diff { struct hunk head; struct hunk *hunk; size_t hunk_nr, hunk_alloc; unsigned deleted:1, added:1, mode_change:1,binary:1; } *file_diff; size_t file_diff_nr; /* patch mode */ struct patch_mode *mode; const char *revision; }; static void add_p_state_clear(struct add_p_state *s) { size_t i; strbuf_release(&s->answer); strbuf_release(&s->buf); strbuf_release(&s->plain); strbuf_release(&s->colored); for (i = 0; i < s->file_diff_nr; i++) free(s->file_diff[i].hunk); free(s->file_diff); clear_add_i_state(&s->s); } __attribute__((format (printf, 2, 3))) static void err(struct add_p_state *s, const char *fmt, ...) { va_list args; va_start(args, fmt); fputs(s->s.error_color, stdout); vprintf(fmt, args); puts(s->s.reset_color); va_end(args); } LAST_ARG_MUST_BE_NULL static void setup_child_process(struct add_p_state *s, struct child_process *cp, ...) { va_list ap; const char *arg; va_start(ap, cp); while ((arg = va_arg(ap, const char *))) strvec_push(&cp->args, arg); va_end(ap); cp->git_cmd = 1; strvec_pushf(&cp->env, INDEX_ENVIRONMENT "=%s", s->s.r->index_file); } static int parse_range(const char **p, unsigned long *offset, unsigned long *count) { char *pend; *offset = strtoul(*p, &pend, 10); if (pend == *p) return -1; if (*pend != ',') { *count = 1; *p = pend; return 0; } *count = strtoul(pend + 1, (char **)p, 10); return *p == pend + 1 ? -1 : 0; } static int parse_hunk_header(struct add_p_state *s, struct hunk *hunk) { struct hunk_header *header = &hunk->header; const char *line = s->plain.buf + hunk->start, *p = line; char *eol = memchr(p, '\n', s->plain.len - hunk->start); if (!eol) eol = s->plain.buf + s->plain.len; if (!skip_prefix(p, "@@ -", &p) || parse_range(&p, &header->old_offset, &header->old_count) < 0 || !skip_prefix(p, " +", &p) || parse_range(&p, &header->new_offset, &header->new_count) < 0 || !skip_prefix(p, " @@", &p)) return error(_("could not parse hunk header '%.*s'"), (int)(eol - line), line); hunk->start = eol - s->plain.buf + (*eol == '\n'); header->extra_start = p - s->plain.buf; header->extra_end = hunk->start; if (!s->colored.len) { header->colored_extra_start = header->colored_extra_end = 0; return 0; } /* Now find the extra text in the colored diff */ line = s->colored.buf + hunk->colored_start; eol = memchr(line, '\n', s->colored.len - hunk->colored_start); if (!eol) eol = s->colored.buf + s->colored.len; p = memmem(line, eol - line, "@@ -", 4); if (p && (p = memmem(p + 4, eol - p - 4, " @@", 3))) { header->colored_extra_start = p + 3 - s->colored.buf; } else { /* could not parse colored hunk header, leave as-is */ header->colored_extra_start = hunk->colored_start; header->suppress_colored_line_range = 1; } hunk->colored_start = eol - s->colored.buf + (*eol == '\n'); header->colored_extra_end = hunk->colored_start; return 0; } static int is_octal(const char *p, size_t len) { if (!len) return 0; while (len--) if (*p < '0' || *(p++) > '7') return 0; return 1; } static void complete_file(char marker, struct hunk *hunk) { if (marker == '-' || marker == '+') /* * Last hunk ended in non-context line (i.e. it * appended lines to the file, so there are no * trailing context lines). */ hunk->splittable_into++; } /* Empty context lines may omit the leading ' ' */ static int normalize_marker(const char *p) { return p[0] == '\n' || (p[0] == '\r' && p[1] == '\n') ? ' ' : p[0]; } static int parse_diff(struct add_p_state *s, const struct pathspec *ps) { struct strvec args = STRVEC_INIT; const char *diff_algorithm = s->s.interactive_diff_algorithm; struct strbuf *plain = &s->plain, *colored = NULL; struct child_process cp = CHILD_PROCESS_INIT; char *p, *pend, *colored_p = NULL, *colored_pend = NULL, marker = '\0'; size_t file_diff_alloc = 0, i, color_arg_index; struct file_diff *file_diff = NULL; struct hunk *hunk = NULL; int res; strvec_pushv(&args, s->mode->diff_cmd); if (diff_algorithm) strvec_pushf(&args, "--diff-algorithm=%s", diff_algorithm); if (s->revision) { struct object_id oid; strvec_push(&args, /* could be on an unborn branch */ !strcmp("HEAD", s->revision) && repo_get_oid(the_repository, "HEAD", &oid) ? empty_tree_oid_hex(the_repository->hash_algo) : s->revision); } color_arg_index = args.nr; /* Use `--no-color` explicitly, just in case `diff.color = always`. */ strvec_pushl(&args, "--no-color", "--ignore-submodules=dirty", "-p", "--", NULL); for (i = 0; i < ps->nr; i++) strvec_push(&args, ps->items[i].original); setup_child_process(s, &cp, NULL); strvec_pushv(&cp.args, args.v); res = capture_command(&cp, plain, 0); if (res) { strvec_clear(&args); return error(_("could not parse diff")); } if (!plain->len) { strvec_clear(&args); return 0; } strbuf_complete_line(plain); if (want_color_fd(1, -1)) { struct child_process colored_cp = CHILD_PROCESS_INIT; const char *diff_filter = s->s.interactive_diff_filter; setup_child_process(s, &colored_cp, NULL); xsnprintf((char *)args.v[color_arg_index], 8, "--color"); strvec_pushv(&colored_cp.args, args.v); colored = &s->colored; res = capture_command(&colored_cp, colored, 0); strvec_clear(&args); if (res) return error(_("could not parse colored diff")); if (diff_filter) { struct child_process filter_cp = CHILD_PROCESS_INIT; setup_child_process(s, &filter_cp, diff_filter, NULL); filter_cp.git_cmd = 0; filter_cp.use_shell = 1; strbuf_reset(&s->buf); if (pipe_command(&filter_cp, colored->buf, colored->len, &s->buf, colored->len, NULL, 0) < 0) return error(_("failed to run '%s'"), diff_filter); strbuf_swap(colored, &s->buf); } strbuf_complete_line(colored); colored_p = colored->buf; colored_pend = colored_p + colored->len; } strvec_clear(&args); /* parse files and hunks */ p = plain->buf; pend = p + plain->len; while (p != pend) { char *eol = memchr(p, '\n', pend - p); const char *deleted = NULL, *mode_change = NULL; char ch = normalize_marker(p); if (!eol) eol = pend; if (starts_with(p, "diff ") || starts_with(p, "* Unmerged path ")) { complete_file(marker, hunk); ALLOC_GROW_BY(s->file_diff, s->file_diff_nr, 1, file_diff_alloc); file_diff = s->file_diff + s->file_diff_nr - 1; hunk = &file_diff->head; hunk->start = p - plain->buf; if (colored_p) hunk->colored_start = colored_p - colored->buf; marker = '\0'; } else if (p == plain->buf) BUG("diff starts with unexpected line:\n" "%.*s\n", (int)(eol - p), p); else if (file_diff->deleted) ; /* keep the rest of the file in a single "hunk" */ else if (starts_with(p, "@@ ") || (hunk == &file_diff->head && (skip_prefix(p, "deleted file", &deleted)))) { if (marker == '-' || marker == '+') /* * Should not happen; previous hunk did not end * in a context line? Handle it anyway. */ hunk->splittable_into++; ALLOC_GROW_BY(file_diff->hunk, file_diff->hunk_nr, 1, file_diff->hunk_alloc); hunk = file_diff->hunk + file_diff->hunk_nr - 1; hunk->start = p - plain->buf; if (colored) hunk->colored_start = colored_p - colored->buf; if (deleted) file_diff->deleted = 1; else if (parse_hunk_header(s, hunk) < 0) return -1; /* * Start counting into how many hunks this one can be * split */ marker = ch; } else if (hunk == &file_diff->head && starts_with(p, "new file")) { file_diff->added = 1; } else if (hunk == &file_diff->head && skip_prefix(p, "old mode ", &mode_change) && is_octal(mode_change, eol - mode_change)) { if (file_diff->mode_change) BUG("double mode change?\n\n%.*s", (int)(eol - plain->buf), plain->buf); if (file_diff->hunk_nr) BUG("mode change in the middle?\n\n%.*s", (int)(eol - plain->buf), plain->buf); /* * Do *not* change `hunk`: the mode change pseudo-hunk * is _part of_ the header "hunk". */ file_diff->mode_change = 1; ALLOC_GROW_BY(file_diff->hunk, file_diff->hunk_nr, 1, file_diff->hunk_alloc); file_diff->hunk->start = p - plain->buf; if (colored_p) file_diff->hunk->colored_start = colored_p - colored->buf; } else if (hunk == &file_diff->head && skip_prefix(p, "new mode ", &mode_change) && is_octal(mode_change, eol - mode_change)) { /* * Extend the "mode change" pseudo-hunk to include also * the "new mode" line. */ if (!file_diff->mode_change) BUG("'new mode' without 'old mode'?\n\n%.*s", (int)(eol - plain->buf), plain->buf); if (file_diff->hunk_nr != 1) BUG("mode change in the middle?\n\n%.*s", (int)(eol - plain->buf), plain->buf); if (p - plain->buf != file_diff->hunk->end) BUG("'new mode' does not immediately follow " "'old mode'?\n\n%.*s", (int)(eol - plain->buf), plain->buf); } else if (hunk == &file_diff->head && starts_with(p, "Binary files ")) file_diff->binary = 1; if (!!file_diff->deleted + !!file_diff->added + !!file_diff->mode_change > 1) BUG("diff can only contain delete *or* add *or* a " "mode change?!?\n%.*s", (int)(eol - (plain->buf + file_diff->head.start)), plain->buf + file_diff->head.start); if ((marker == '-' || marker == '+') && ch == ' ') hunk->splittable_into++; if (marker && ch != '\\') marker = ch; p = eol == pend ? pend : eol + 1; hunk->end = p - plain->buf; if (colored) { char *colored_eol = memchr(colored_p, '\n', colored_pend - colored_p); if (colored_eol) colored_p = colored_eol + 1; else if (p != pend) /* non-colored has more lines? */ goto mismatched_output; else if (colored_p == colored_pend) /* last line has no matching colored one? */ goto mismatched_output; else colored_p = colored_pend; hunk->colored_end = colored_p - colored->buf; } if (mode_change) { if (file_diff->hunk_nr != 1) BUG("mode change in hunk #%d???", (int)file_diff->hunk_nr); /* Adjust the end of the "mode change" pseudo-hunk */ file_diff->hunk->end = hunk->end; if (colored) file_diff->hunk->colored_end = hunk->colored_end; } } complete_file(marker, hunk); /* non-colored shorter than colored? */ if (colored_p != colored_pend) { mismatched_output: error(_("mismatched output from interactive.diffFilter")); advise(_("Your filter must maintain a one-to-one correspondence\n" "between its input and output lines.")); return -1; } return 0; } static size_t find_next_line(struct strbuf *sb, size_t offset) { char *eol; if (offset >= sb->len) BUG("looking for next line beyond buffer (%d >= %d)\n%s", (int)offset, (int)sb->len, sb->buf); eol = memchr(sb->buf + offset, '\n', sb->len - offset); if (!eol) return sb->len; return eol - sb->buf + 1; } static void render_hunk(struct add_p_state *s, struct hunk *hunk, ssize_t delta, int colored, struct strbuf *out) { struct hunk_header *header = &hunk->header; if (hunk->header.old_offset != 0 || hunk->header.new_offset != 0) { /* * Generate the hunk header dynamically, except for special * hunks (such as the diff header). */ const char *p; size_t len; unsigned long old_offset = header->old_offset; unsigned long new_offset = header->new_offset; if (!colored) { p = s->plain.buf + header->extra_start; len = header->extra_end - header->extra_start; } else if (header->suppress_colored_line_range) { strbuf_add(out, s->colored.buf + header->colored_extra_start, header->colored_extra_end - header->colored_extra_start); strbuf_add(out, s->colored.buf + hunk->colored_start, hunk->colored_end - hunk->colored_start); return; } else { strbuf_addstr(out, s->s.fraginfo_color); p = s->colored.buf + header->colored_extra_start; len = header->colored_extra_end - header->colored_extra_start; } if (s->mode->is_reverse) old_offset -= delta; else new_offset += delta; strbuf_addf(out, "@@ -%lu", old_offset); if (header->old_count != 1) strbuf_addf(out, ",%lu", header->old_count); strbuf_addf(out, " +%lu", new_offset); if (header->new_count != 1) strbuf_addf(out, ",%lu", header->new_count); strbuf_addstr(out, " @@"); if (len) strbuf_add(out, p, len); else if (colored) strbuf_addf(out, "%s\n", s->s.reset_color); else strbuf_addch(out, '\n'); } if (colored) strbuf_add(out, s->colored.buf + hunk->colored_start, hunk->colored_end - hunk->colored_start); else strbuf_add(out, s->plain.buf + hunk->start, hunk->end - hunk->start); } static void render_diff_header(struct add_p_state *s, struct file_diff *file_diff, int colored, struct strbuf *out) { /* * If there was a mode change, the first hunk is a pseudo hunk that * corresponds to the mode line in the header. If the user did not want * to stage that "hunk", we actually have to cut it out from the header. */ int skip_mode_change = file_diff->mode_change && file_diff->hunk->use != USE_HUNK; struct hunk *head = &file_diff->head, *first = file_diff->hunk; if (!skip_mode_change) { render_hunk(s, head, 0, colored, out); return; } if (colored) { const char *p = s->colored.buf; strbuf_add(out, p + head->colored_start, first->colored_start - head->colored_start); strbuf_add(out, p + first->colored_end, head->colored_end - first->colored_end); } else { const char *p = s->plain.buf; strbuf_add(out, p + head->start, first->start - head->start); strbuf_add(out, p + first->end, head->end - first->end); } } /* Coalesce hunks again that were split */ static int merge_hunks(struct add_p_state *s, struct file_diff *file_diff, size_t *hunk_index, int use_all, struct hunk *merged) { size_t i = *hunk_index, delta; struct hunk *hunk = file_diff->hunk + i; /* `header` corresponds to the merged hunk */ struct hunk_header *header = &merged->header, *next; if (!use_all && hunk->use != USE_HUNK) return 0; *merged = *hunk; /* We simply skip the colored part (if any) when merging hunks */ merged->colored_start = merged->colored_end = 0; for (; i + 1 < file_diff->hunk_nr; i++) { hunk++; next = &hunk->header; /* * Stop merging hunks when: * * - the hunk is not selected for use, or * - the hunk does not overlap with the already-merged hunk(s) */ if ((!use_all && hunk->use != USE_HUNK) || header->new_offset >= next->new_offset + merged->delta || header->new_offset + header->new_count < next->new_offset + merged->delta) break; /* * If the hunks were not edited, and overlap, we can simply * extend the line range. */ if (merged->start < hunk->start && merged->end > hunk->start) { merged->end = hunk->end; merged->colored_end = hunk->colored_end; delta = 0; } else { const char *plain = s->plain.buf; size_t overlapping_line_count = header->new_offset + header->new_count - merged->delta - next->new_offset; size_t overlap_end = hunk->start; size_t overlap_start = overlap_end; size_t overlap_next, len, j; /* * One of the hunks was edited: the modified hunk was * appended to the strbuf `s->plain`. * * Let's ensure that at least the last context line of * the first hunk overlaps with the corresponding line * of the second hunk, and then merge. */ for (j = 0; j < overlapping_line_count; j++) { overlap_next = find_next_line(&s->plain, overlap_end); if (overlap_next > hunk->end) BUG("failed to find %d context lines " "in:\n%.*s", (int)overlapping_line_count, (int)(hunk->end - hunk->start), plain + hunk->start); if (normalize_marker(&plain[overlap_end]) != ' ') return error(_("expected context line " "#%d in\n%.*s"), (int)(j + 1), (int)(hunk->end - hunk->start), plain + hunk->start); overlap_start = overlap_end; overlap_end = overlap_next; } len = overlap_end - overlap_start; if (len > merged->end - merged->start || memcmp(plain + merged->end - len, plain + overlap_start, len)) return error(_("hunks do not overlap:\n%.*s\n" "\tdoes not end with:\n%.*s"), (int)(merged->end - merged->start), plain + merged->start, (int)len, plain + overlap_start); /* * Since the start-end ranges are not adjacent, we * cannot simply take the union of the ranges. To * address that, we temporarily append the union of the * lines to the `plain` strbuf. */ if (merged->end != s->plain.len) { size_t start = s->plain.len; strbuf_add(&s->plain, plain + merged->start, merged->end - merged->start); plain = s->plain.buf; merged->start = start; merged->end = s->plain.len; } strbuf_add(&s->plain, plain + overlap_end, hunk->end - overlap_end); merged->end = s->plain.len; merged->splittable_into += hunk->splittable_into; delta = merged->delta; merged->delta += hunk->delta; } header->old_count = next->old_offset + next->old_count - header->old_offset; header->new_count = next->new_offset + delta + next->new_count - header->new_offset; } if (i == *hunk_index) return 0; *hunk_index = i; return 1; } static void reassemble_patch(struct add_p_state *s, struct file_diff *file_diff, int use_all, struct strbuf *out) { struct hunk *hunk; size_t save_len = s->plain.len, i; ssize_t delta = 0; render_diff_header(s, file_diff, 0, out); for (i = file_diff->mode_change; i < file_diff->hunk_nr; i++) { struct hunk merged = { 0 }; hunk = file_diff->hunk + i; if (!use_all && hunk->use != USE_HUNK) delta += hunk->header.old_count - hunk->header.new_count; else { /* merge overlapping hunks into a temporary hunk */ if (merge_hunks(s, file_diff, &i, use_all, &merged)) hunk = &merged; render_hunk(s, hunk, delta, 0, out); /* * In case `merge_hunks()` used `plain` as a scratch * pad (this happens when an edited hunk had to be * coalesced with another hunk). */ strbuf_setlen(&s->plain, save_len); delta += hunk->delta; } } } static int split_hunk(struct add_p_state *s, struct file_diff *file_diff, size_t hunk_index) { int colored = !!s->colored.len, first = 1; struct hunk *hunk = file_diff->hunk + hunk_index; size_t splittable_into; size_t end, colored_end, current, colored_current = 0, context_line_count; struct hunk_header remaining, *header; char marker, ch; if (hunk_index >= file_diff->hunk_nr) BUG("invalid hunk index: %d (must be >= 0 and < %d)", (int)hunk_index, (int)file_diff->hunk_nr); if (hunk->splittable_into < 2) return 0; splittable_into = hunk->splittable_into; end = hunk->end; colored_end = hunk->colored_end; remaining = hunk->header; file_diff->hunk_nr += splittable_into - 1; ALLOC_GROW(file_diff->hunk, file_diff->hunk_nr, file_diff->hunk_alloc); if (hunk_index + splittable_into < file_diff->hunk_nr) memmove(file_diff->hunk + hunk_index + splittable_into, file_diff->hunk + hunk_index + 1, (file_diff->hunk_nr - hunk_index - splittable_into) * sizeof(*hunk)); hunk = file_diff->hunk + hunk_index; hunk->splittable_into = 1; memset(hunk + 1, 0, (splittable_into - 1) * sizeof(*hunk)); header = &hunk->header; header->old_count = header->new_count = 0; current = hunk->start; if (colored) colored_current = hunk->colored_start; marker = '\0'; context_line_count = 0; while (splittable_into > 1) { ch = normalize_marker(&s->plain.buf[current]); if (!ch) BUG("buffer overrun while splitting hunks"); /* * Is this the first context line after a chain of +/- lines? * Then record the start of the next split hunk. */ if ((marker == '-' || marker == '+') && ch == ' ') { first = 0; hunk[1].start = current; if (colored) hunk[1].colored_start = colored_current; context_line_count = 0; } /* * Was the previous line a +/- one? Alternatively, is this the * first line (and not a +/- one)? * * Then just increment the appropriate counter and continue * with the next line. */ if (marker != ' ' || (ch != '-' && ch != '+')) { next_hunk_line: /* Comment lines are attached to the previous line */ if (ch == '\\') ch = marker ? marker : ' '; /* current hunk not done yet */ if (ch == ' ') context_line_count++; else if (ch == '-') header->old_count++; else if (ch == '+') header->new_count++; else BUG("unhandled diff marker: '%c'", ch); marker = ch; current = find_next_line(&s->plain, current); if (colored) colored_current = find_next_line(&s->colored, colored_current); continue; } /* * We got us the start of a new hunk! * * This is a context line, so it is shared with the previous * hunk, if any. */ if (first) { if (header->old_count || header->new_count) BUG("counts are off: %d/%d", (int)header->old_count, (int)header->new_count); header->old_count = context_line_count; header->new_count = context_line_count; context_line_count = 0; first = 0; goto next_hunk_line; } remaining.old_offset += header->old_count; remaining.old_count -= header->old_count; remaining.new_offset += header->new_count; remaining.new_count -= header->new_count; /* initialize next hunk header's offsets */ hunk[1].header.old_offset = header->old_offset + header->old_count; hunk[1].header.new_offset = header->new_offset + header->new_count; /* add one split hunk */ header->old_count += context_line_count; header->new_count += context_line_count; hunk->end = current; if (colored) hunk->colored_end = colored_current; hunk++; hunk->splittable_into = 1; hunk->use = hunk[-1].use; header = &hunk->header; header->old_count = header->new_count = context_line_count; context_line_count = 0; splittable_into--; marker = ch; } /* last hunk simply gets the rest */ if (header->old_offset != remaining.old_offset) BUG("miscounted old_offset: %lu != %lu", header->old_offset, remaining.old_offset); if (header->new_offset != remaining.new_offset) BUG("miscounted new_offset: %lu != %lu", header->new_offset, remaining.new_offset); header->old_count = remaining.old_count; header->new_count = remaining.new_count; hunk->end = end; if (colored) hunk->colored_end = colored_end; return 0; } static void recolor_hunk(struct add_p_state *s, struct hunk *hunk) { const char *plain = s->plain.buf; size_t current, eol, next; if (!s->colored.len) return; hunk->colored_start = s->colored.len; for (current = hunk->start; current < hunk->end; ) { for (eol = current; eol < hunk->end; eol++) if (plain[eol] == '\n') break; next = eol + (eol < hunk->end); if (eol > current && plain[eol - 1] == '\r') eol--; strbuf_addstr(&s->colored, plain[current] == '-' ? s->s.file_old_color : plain[current] == '+' ? s->s.file_new_color : s->s.context_color); strbuf_add(&s->colored, plain + current, eol - current); strbuf_addstr(&s->colored, s->s.reset_color); if (next > eol) strbuf_add(&s->colored, plain + eol, next - eol); current = next; } hunk->colored_end = s->colored.len; } static int edit_hunk_manually(struct add_p_state *s, struct hunk *hunk) { size_t i; strbuf_reset(&s->buf); strbuf_commented_addf(&s->buf, comment_line_str, _("Manual hunk edit mode -- see bottom for " "a quick guide.\n")); render_hunk(s, hunk, 0, 0, &s->buf); strbuf_commented_addf(&s->buf, comment_line_str, _("---\n" "To remove '%c' lines, make them ' ' lines " "(context).\n" "To remove '%c' lines, delete them.\n" "Lines starting with %s will be removed.\n"), s->mode->is_reverse ? '+' : '-', s->mode->is_reverse ? '-' : '+', comment_line_str); strbuf_commented_addf(&s->buf, comment_line_str, "%s", _(s->mode->edit_hunk_hint)); /* * TRANSLATORS: 'it' refers to the patch mentioned in the previous * messages. */ strbuf_commented_addf(&s->buf, comment_line_str, _("If it does not apply cleanly, you will be " "given an opportunity to\n" "edit again. If all lines of the hunk are " "removed, then the edit is\n" "aborted and the hunk is left unchanged.\n")); if (strbuf_edit_interactively(the_repository, &s->buf, "addp-hunk-edit.diff", NULL) < 0) return -1; /* strip out commented lines */ hunk->start = s->plain.len; for (i = 0; i < s->buf.len; ) { size_t next = find_next_line(&s->buf, i); if (!starts_with(s->buf.buf + i, comment_line_str)) strbuf_add(&s->plain, s->buf.buf + i, next - i); i = next; } hunk->end = s->plain.len; if (hunk->end == hunk->start) /* The user aborted editing by deleting everything */ return 0; recolor_hunk(s, hunk); /* * If the hunk header is intact, parse it, otherwise simply use the * hunk header prior to editing (which will adjust `hunk->start` to * skip the hunk header). */ if (s->plain.buf[hunk->start] == '@' && parse_hunk_header(s, hunk) < 0) return error(_("could not parse hunk header")); return 1; } static ssize_t recount_edited_hunk(struct add_p_state *s, struct hunk *hunk, size_t orig_old_count, size_t orig_new_count) { struct hunk_header *header = &hunk->header; size_t i; header->old_count = header->new_count = 0; for (i = hunk->start; i < hunk->end; ) { switch(normalize_marker(&s->plain.buf[i])) { case '-': header->old_count++; break; case '+': header->new_count++; break; case ' ': header->old_count++; header->new_count++; break; } i = find_next_line(&s->plain, i); } return orig_old_count - orig_new_count - header->old_count + header->new_count; } static int run_apply_check(struct add_p_state *s, struct file_diff *file_diff) { struct child_process cp = CHILD_PROCESS_INIT; strbuf_reset(&s->buf); reassemble_patch(s, file_diff, 1, &s->buf); setup_child_process(s, &cp, "apply", "--check", NULL); strvec_pushv(&cp.args, s->mode->apply_check_args); if (pipe_command(&cp, s->buf.buf, s->buf.len, NULL, 0, NULL, 0)) return error(_("'git apply --cached' failed")); return 0; } static int read_single_character(struct add_p_state *s) { if (s->s.use_single_key) { int res = read_key_without_echo(&s->answer); printf("%s\n", res == EOF ? "" : s->answer.buf); return res; } if (git_read_line_interactively(&s->answer) == EOF) return EOF; return 0; } static int prompt_yesno(struct add_p_state *s, const char *prompt) { for (;;) { color_fprintf(stdout, s->s.prompt_color, "%s", _(prompt)); fflush(stdout); if (read_single_character(s) == EOF) return -1; /* do not limit to 1-byte input to allow 'no' etc. */ switch (tolower(s->answer.buf[0])) { case 'n': return 0; case 'y': return 1; } } } static int edit_hunk_loop(struct add_p_state *s, struct file_diff *file_diff, struct hunk *hunk) { size_t plain_len = s->plain.len, colored_len = s->colored.len; struct hunk backup; backup = *hunk; for (;;) { int res = edit_hunk_manually(s, hunk); if (res == 0) { /* abandoned */ *hunk = backup; return -1; } if (res > 0) { hunk->delta += recount_edited_hunk(s, hunk, backup.header.old_count, backup.header.new_count); if (!run_apply_check(s, file_diff)) return 0; } /* Drop edits (they were appended to s->plain) */ strbuf_setlen(&s->plain, plain_len); strbuf_setlen(&s->colored, colored_len); *hunk = backup; /* * TRANSLATORS: do not translate [y/n] * The program will only accept that input at this point. * Consider translating (saying "no" discards!) as * (saying "n" for "no" discards!) if the translation * of the word "no" does not start with n. */ res = prompt_yesno(s, _("Your edited hunk does not apply. " "Edit again (saying \"no\" discards!) " "[y/n]? ")); if (res < 1) return -1; } } static int apply_for_checkout(struct add_p_state *s, struct strbuf *diff, int is_reverse) { const char *reverse = is_reverse ? "-R" : NULL; struct child_process check_index = CHILD_PROCESS_INIT; struct child_process check_worktree = CHILD_PROCESS_INIT; struct child_process apply_index = CHILD_PROCESS_INIT; struct child_process apply_worktree = CHILD_PROCESS_INIT; int applies_index, applies_worktree; setup_child_process(s, &check_index, "apply", "--cached", "--check", reverse, NULL); applies_index = !pipe_command(&check_index, diff->buf, diff->len, NULL, 0, NULL, 0); setup_child_process(s, &check_worktree, "apply", "--check", reverse, NULL); applies_worktree = !pipe_command(&check_worktree, diff->buf, diff->len, NULL, 0, NULL, 0); if (applies_worktree && applies_index) { setup_child_process(s, &apply_index, "apply", "--cached", reverse, NULL); pipe_command(&apply_index, diff->buf, diff->len, NULL, 0, NULL, 0); setup_child_process(s, &apply_worktree, "apply", reverse, NULL); pipe_command(&apply_worktree, diff->buf, diff->len, NULL, 0, NULL, 0); return 1; } if (!applies_index) { err(s, _("The selected hunks do not apply to the index!")); if (prompt_yesno(s, _("Apply them to the worktree " "anyway? ")) > 0) { setup_child_process(s, &apply_worktree, "apply", reverse, NULL); return pipe_command(&apply_worktree, diff->buf, diff->len, NULL, 0, NULL, 0); } err(s, _("Nothing was applied.\n")); } else /* As a last resort, show the diff to the user */ fwrite(diff->buf, diff->len, 1, stdout); return 0; } #define SUMMARY_HEADER_WIDTH 20 #define SUMMARY_LINE_WIDTH 80 static void summarize_hunk(struct add_p_state *s, struct hunk *hunk, struct strbuf *out) { struct hunk_header *header = &hunk->header; struct strbuf *plain = &s->plain; size_t len = out->len, i; strbuf_addf(out, " -%lu,%lu +%lu,%lu ", header->old_offset, header->old_count, header->new_offset, header->new_count); if (out->len - len < SUMMARY_HEADER_WIDTH) strbuf_addchars(out, ' ', SUMMARY_HEADER_WIDTH + len - out->len); for (i = hunk->start; i < hunk->end; i = find_next_line(plain, i)) if (plain->buf[i] != ' ') break; if (i < hunk->end) strbuf_add(out, plain->buf + i, find_next_line(plain, i) - i); if (out->len - len > SUMMARY_LINE_WIDTH) strbuf_setlen(out, len + SUMMARY_LINE_WIDTH); strbuf_complete_line(out); } #define DISPLAY_HUNKS_LINES 20 static size_t display_hunks(struct add_p_state *s, struct file_diff *file_diff, size_t start_index) { size_t end_index = start_index + DISPLAY_HUNKS_LINES; if (end_index > file_diff->hunk_nr) end_index = file_diff->hunk_nr; while (start_index < end_index) { struct hunk *hunk = file_diff->hunk + start_index++; strbuf_reset(&s->buf); strbuf_addf(&s->buf, "%c%2d: ", hunk->use == USE_HUNK ? '+' : hunk->use == SKIP_HUNK ? '-' : ' ', (int)start_index); summarize_hunk(s, hunk, &s->buf); fputs(s->buf.buf, stdout); } return end_index; } static const char help_patch_remainder[] = N_("j - leave this hunk undecided, see next undecided hunk\n" "J - leave this hunk undecided, see next hunk\n" "k - leave this hunk undecided, see previous undecided hunk\n" "K - leave this hunk undecided, see previous hunk\n" "g - select a hunk to go to\n" "/ - search for a hunk matching the given regex\n" "s - split the current hunk into smaller hunks\n" "e - manually edit the current hunk\n" "p - print the current hunk, 'P' to use the pager\n" "? - print help\n"); static int patch_update_file(struct add_p_state *s, struct file_diff *file_diff) { size_t hunk_index = 0; ssize_t i, undecided_previous, undecided_next, rendered_hunk_index = -1; struct hunk *hunk; char ch; struct child_process cp = CHILD_PROCESS_INIT; int colored = !!s->colored.len, quit = 0, use_pager = 0; enum prompt_mode_type prompt_mode_type; enum { ALLOW_GOTO_PREVIOUS_HUNK = 1 << 0, ALLOW_GOTO_PREVIOUS_UNDECIDED_HUNK = 1 << 1, ALLOW_GOTO_NEXT_HUNK = 1 << 2, ALLOW_GOTO_NEXT_UNDECIDED_HUNK = 1 << 3, ALLOW_SEARCH_AND_GOTO = 1 << 4, ALLOW_SPLIT = 1 << 5, ALLOW_EDIT = 1 << 6 } permitted = 0; /* Empty added files have no hunks */ if (!file_diff->hunk_nr && !file_diff->added) return 0; strbuf_reset(&s->buf); render_diff_header(s, file_diff, colored, &s->buf); fputs(s->buf.buf, stdout); for (;;) { if (hunk_index >= file_diff->hunk_nr) hunk_index = 0; hunk = file_diff->hunk_nr ? file_diff->hunk + hunk_index : &file_diff->head; undecided_previous = -1; undecided_next = -1; if (file_diff->hunk_nr) { for (i = hunk_index - 1; i >= 0; i--) if (file_diff->hunk[i].use == UNDECIDED_HUNK) { undecided_previous = i; break; } for (i = hunk_index + 1; i < file_diff->hunk_nr; i++) if (file_diff->hunk[i].use == UNDECIDED_HUNK) { undecided_next = i; break; } } /* Everything decided? */ if (undecided_previous < 0 && undecided_next < 0 && hunk->use != UNDECIDED_HUNK) break; strbuf_reset(&s->buf); if (file_diff->hunk_nr) { if (rendered_hunk_index != hunk_index) { if (use_pager) { setup_pager(); sigchain_push(SIGPIPE, SIG_IGN); } render_hunk(s, hunk, 0, colored, &s->buf); fputs(s->buf.buf, stdout); rendered_hunk_index = hunk_index; if (use_pager) { sigchain_pop(SIGPIPE); wait_for_pager(); use_pager = 0; } } strbuf_reset(&s->buf); if (undecided_previous >= 0) { permitted |= ALLOW_GOTO_PREVIOUS_UNDECIDED_HUNK; strbuf_addstr(&s->buf, ",k"); } if (hunk_index) { permitted |= ALLOW_GOTO_PREVIOUS_HUNK; strbuf_addstr(&s->buf, ",K"); } if (undecided_next >= 0) { permitted |= ALLOW_GOTO_NEXT_UNDECIDED_HUNK; strbuf_addstr(&s->buf, ",j"); } if (hunk_index + 1 < file_diff->hunk_nr) { permitted |= ALLOW_GOTO_NEXT_HUNK; strbuf_addstr(&s->buf, ",J"); } if (file_diff->hunk_nr > 1) { permitted |= ALLOW_SEARCH_AND_GOTO; strbuf_addstr(&s->buf, ",g,/"); } if (hunk->splittable_into > 1) { permitted |= ALLOW_SPLIT; strbuf_addstr(&s->buf, ",s"); } if (hunk_index + 1 > file_diff->mode_change && !file_diff->deleted) { permitted |= ALLOW_EDIT; strbuf_addstr(&s->buf, ",e"); } strbuf_addstr(&s->buf, ",p"); } if (file_diff->deleted) prompt_mode_type = PROMPT_DELETION; else if (file_diff->added) prompt_mode_type = PROMPT_ADDITION; else if (file_diff->mode_change && !hunk_index) prompt_mode_type = PROMPT_MODE_CHANGE; else prompt_mode_type = PROMPT_HUNK; printf("%s(%"PRIuMAX"/%"PRIuMAX") ", s->s.prompt_color, (uintmax_t)hunk_index + 1, (uintmax_t)(file_diff->hunk_nr ? file_diff->hunk_nr : 1)); printf(_(s->mode->prompt_mode[prompt_mode_type]), s->buf.buf); if (*s->s.reset_color) fputs(s->s.reset_color, stdout); fflush(stdout); if (read_single_character(s) == EOF) break; if (!s->answer.len) continue; ch = tolower(s->answer.buf[0]); /* 'g' takes a hunk number and '/' takes a regexp */ if (s->answer.len != 1 && (ch != 'g' && ch != '/')) { err(s, _("Only one letter is expected, got '%s'"), s->answer.buf); continue; } if (ch == 'y') { hunk->use = USE_HUNK; soft_increment: hunk_index = undecided_next < 0 ? file_diff->hunk_nr : undecided_next; } else if (ch == 'n') { hunk->use = SKIP_HUNK; goto soft_increment; } else if (ch == 'a') { if (file_diff->hunk_nr) { for (; hunk_index < file_diff->hunk_nr; hunk_index++) { hunk = file_diff->hunk + hunk_index; if (hunk->use == UNDECIDED_HUNK) hunk->use = USE_HUNK; } } else if (hunk->use == UNDECIDED_HUNK) { hunk->use = USE_HUNK; } } else if (ch == 'd' || ch == 'q') { if (file_diff->hunk_nr) { for (; hunk_index < file_diff->hunk_nr; hunk_index++) { hunk = file_diff->hunk + hunk_index; if (hunk->use == UNDECIDED_HUNK) hunk->use = SKIP_HUNK; } } else if (hunk->use == UNDECIDED_HUNK) { hunk->use = SKIP_HUNK; } if (ch == 'q') { quit = 1; break; } } else if (s->answer.buf[0] == 'K') { if (permitted & ALLOW_GOTO_PREVIOUS_HUNK) hunk_index--; else err(s, _("No previous hunk")); } else if (s->answer.buf[0] == 'J') { if (permitted & ALLOW_GOTO_NEXT_HUNK) hunk_index++; else err(s, _("No next hunk")); } else if (s->answer.buf[0] == 'k') { if (permitted & ALLOW_GOTO_PREVIOUS_UNDECIDED_HUNK) hunk_index = undecided_previous; else err(s, _("No previous hunk")); } else if (s->answer.buf[0] == 'j') { if (permitted & ALLOW_GOTO_NEXT_UNDECIDED_HUNK) hunk_index = undecided_next; else err(s, _("No next hunk")); } else if (s->answer.buf[0] == 'g') { char *pend; unsigned long response; if (!(permitted & ALLOW_SEARCH_AND_GOTO)) { err(s, _("No other hunks to goto")); continue; } strbuf_remove(&s->answer, 0, 1); strbuf_trim(&s->answer); i = hunk_index - DISPLAY_HUNKS_LINES / 2; if (i < (int)file_diff->mode_change) i = file_diff->mode_change; while (s->answer.len == 0) { i = display_hunks(s, file_diff, i); printf("%s", i < file_diff->hunk_nr ? _("go to which hunk ( to see " "more)? ") : _("go to which hunk? ")); fflush(stdout); if (strbuf_getline(&s->answer, stdin) == EOF) break; strbuf_trim_trailing_newline(&s->answer); } strbuf_trim(&s->answer); response = strtoul(s->answer.buf, &pend, 10); if (*pend || pend == s->answer.buf) err(s, _("Invalid number: '%s'"), s->answer.buf); else if (0 < response && response <= file_diff->hunk_nr) hunk_index = response - 1; else err(s, Q_("Sorry, only %d hunk available.", "Sorry, only %d hunks available.", file_diff->hunk_nr), (int)file_diff->hunk_nr); } else if (s->answer.buf[0] == '/') { regex_t regex; int ret; if (!(permitted & ALLOW_SEARCH_AND_GOTO)) { err(s, _("No other hunks to search")); continue; } strbuf_remove(&s->answer, 0, 1); strbuf_trim_trailing_newline(&s->answer); if (s->answer.len == 0) { printf("%s", _("search for regex? ")); fflush(stdout); if (strbuf_getline(&s->answer, stdin) == EOF) break; strbuf_trim_trailing_newline(&s->answer); if (s->answer.len == 0) continue; } ret = regcomp(®ex, s->answer.buf, REG_EXTENDED | REG_NOSUB | REG_NEWLINE); if (ret) { char errbuf[1024]; regerror(ret, ®ex, errbuf, sizeof(errbuf)); err(s, _("Malformed search regexp %s: %s"), s->answer.buf, errbuf); continue; } i = hunk_index; for (;;) { /* render the hunk into a scratch buffer */ render_hunk(s, file_diff->hunk + i, 0, 0, &s->buf); if (regexec(®ex, s->buf.buf, 0, NULL, 0) != REG_NOMATCH) break; i++; if (i == file_diff->hunk_nr) i = 0; if (i != hunk_index) continue; err(s, _("No hunk matches the given pattern")); break; } regfree(®ex); hunk_index = i; } else if (s->answer.buf[0] == 's') { size_t splittable_into = hunk->splittable_into; if (!(permitted & ALLOW_SPLIT)) { err(s, _("Sorry, cannot split this hunk")); } else if (!split_hunk(s, file_diff, hunk - file_diff->hunk)) { color_fprintf_ln(stdout, s->s.header_color, _("Split into %d hunks."), (int)splittable_into); rendered_hunk_index = -1; } } else if (s->answer.buf[0] == 'e') { if (!(permitted & ALLOW_EDIT)) err(s, _("Sorry, cannot edit this hunk")); else if (edit_hunk_loop(s, file_diff, hunk) >= 0) { hunk->use = USE_HUNK; goto soft_increment; } } else if (ch == 'p') { rendered_hunk_index = -1; use_pager = (s->answer.buf[0] == 'P') ? 1 : 0; } else if (s->answer.buf[0] == '?') { const char *p = _(help_patch_remainder), *eol = p; color_fprintf(stdout, s->s.help_color, "%s", _(s->mode->help_patch_text)); /* * Show only those lines of the remainder that are * actually applicable with the current hunk. */ for (; *p; p = eol + (*eol == '\n')) { eol = strchrnul(p, '\n'); /* * `s->buf` still contains the part of the * commands shown in the prompt that are not * always available. */ if (*p != '?' && !strchr(s->buf.buf, *p)) continue; color_fprintf_ln(stdout, s->s.help_color, "%.*s", (int)(eol - p), p); } } else { err(s, _("Unknown command '%s' (use '?' for help)"), s->answer.buf); } } /* Any hunk to be used? */ for (i = 0; i < file_diff->hunk_nr; i++) if (file_diff->hunk[i].use == USE_HUNK) break; if (i < file_diff->hunk_nr || (!file_diff->hunk_nr && file_diff->head.use == USE_HUNK)) { /* At least one hunk selected: apply */ strbuf_reset(&s->buf); reassemble_patch(s, file_diff, 0, &s->buf); discard_index(s->s.r->index); if (s->mode->apply_for_checkout) apply_for_checkout(s, &s->buf, s->mode->is_reverse); else { setup_child_process(s, &cp, "apply", NULL); strvec_pushv(&cp.args, s->mode->apply_args); if (pipe_command(&cp, s->buf.buf, s->buf.len, NULL, 0, NULL, 0)) error(_("'git apply' failed")); } if (repo_read_index(s->s.r) >= 0) repo_refresh_and_write_index(s->s.r, REFRESH_QUIET, 0, 1, NULL, NULL, NULL); } putchar('\n'); return quit; } int run_add_p(struct repository *r, enum add_p_mode mode, const char *revision, const struct pathspec *ps) { struct add_p_state s = { { r }, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT }; size_t i, binary_count = 0; init_add_i_state(&s.s, r); if (mode == ADD_P_STASH) s.mode = &patch_mode_stash; else if (mode == ADD_P_RESET) { if (!revision || !strcmp(revision, "HEAD")) s.mode = &patch_mode_reset_head; else s.mode = &patch_mode_reset_nothead; } else if (mode == ADD_P_CHECKOUT) { if (!revision) s.mode = &patch_mode_checkout_index; else if (!strcmp(revision, "HEAD")) s.mode = &patch_mode_checkout_head; else s.mode = &patch_mode_checkout_nothead; } else if (mode == ADD_P_WORKTREE) { if (!revision) s.mode = &patch_mode_checkout_index; else if (!strcmp(revision, "HEAD")) s.mode = &patch_mode_worktree_head; else s.mode = &patch_mode_worktree_nothead; } else s.mode = &patch_mode_add; s.revision = revision; discard_index(r->index); if (repo_read_index(r) < 0 || (!s.mode->index_only && repo_refresh_and_write_index(r, REFRESH_QUIET, 0, 1, NULL, NULL, NULL) < 0) || parse_diff(&s, ps) < 0) { add_p_state_clear(&s); return -1; } for (i = 0; i < s.file_diff_nr; i++) if (s.file_diff[i].binary && !s.file_diff[i].hunk_nr) binary_count++; else if (patch_update_file(&s, s.file_diff + i)) break; if (s.file_diff_nr == 0) err(&s, _("No changes.")); else if (binary_count == s.file_diff_nr) err(&s, _("Only binary files changed.")); add_p_state_clear(&s); return 0; } git-cinnabar-0.7.0/git-core/advice.c000064400000000000000000000233361046102023000153050ustar 00000000000000#include "git-compat-util.h" #include "advice.h" #include "config.h" #include "color.h" #include "environment.h" #include "gettext.h" #include "help.h" #include "string-list.h" static int advice_use_color = -1; static char advice_colors[][COLOR_MAXLEN] = { GIT_COLOR_RESET, GIT_COLOR_YELLOW, /* HINT */ }; enum color_advice { ADVICE_COLOR_RESET = 0, ADVICE_COLOR_HINT = 1, }; static int parse_advise_color_slot(const char *slot) { if (!strcasecmp(slot, "reset")) return ADVICE_COLOR_RESET; if (!strcasecmp(slot, "hint")) return ADVICE_COLOR_HINT; return -1; } static const char *advise_get_color(enum color_advice ix) { if (want_color_stderr(advice_use_color)) return advice_colors[ix]; return ""; } enum advice_level { ADVICE_LEVEL_NONE = 0, ADVICE_LEVEL_DISABLED, ADVICE_LEVEL_ENABLED, }; static struct { const char *key; enum advice_level level; } advice_setting[] = { [ADVICE_ADD_EMBEDDED_REPO] = { "addEmbeddedRepo" }, [ADVICE_ADD_EMPTY_PATHSPEC] = { "addEmptyPathspec" }, [ADVICE_ADD_IGNORED_FILE] = { "addIgnoredFile" }, [ADVICE_AMBIGUOUS_FETCH_REFSPEC] = { "ambiguousFetchRefspec" }, [ADVICE_AM_WORK_DIR] = { "amWorkDir" }, [ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME] = { "checkoutAmbiguousRemoteBranchName" }, [ADVICE_COMMIT_BEFORE_MERGE] = { "commitBeforeMerge" }, [ADVICE_DETACHED_HEAD] = { "detachedHead" }, [ADVICE_DIVERGING] = { "diverging" }, [ADVICE_FETCH_SET_HEAD_WARN] = { "fetchRemoteHEADWarn" }, [ADVICE_FETCH_SHOW_FORCED_UPDATES] = { "fetchShowForcedUpdates" }, [ADVICE_FORCE_DELETE_BRANCH] = { "forceDeleteBranch" }, [ADVICE_GRAFT_FILE_DEPRECATED] = { "graftFileDeprecated" }, [ADVICE_IGNORED_HOOK] = { "ignoredHook" }, [ADVICE_IMPLICIT_IDENTITY] = { "implicitIdentity" }, [ADVICE_MERGE_CONFLICT] = { "mergeConflict" }, [ADVICE_NAME_TOO_LONG] = { "nameTooLong" }, [ADVICE_NESTED_TAG] = { "nestedTag" }, [ADVICE_OBJECT_NAME_WARNING] = { "objectNameWarning" }, [ADVICE_PUSH_ALREADY_EXISTS] = { "pushAlreadyExists" }, [ADVICE_PUSH_FETCH_FIRST] = { "pushFetchFirst" }, [ADVICE_PUSH_NEEDS_FORCE] = { "pushNeedsForce" }, [ADVICE_PUSH_NON_FF_CURRENT] = { "pushNonFFCurrent" }, [ADVICE_PUSH_NON_FF_MATCHING] = { "pushNonFFMatching" }, [ADVICE_PUSH_REF_NEEDS_UPDATE] = { "pushRefNeedsUpdate" }, [ADVICE_PUSH_UNQUALIFIED_REF_NAME] = { "pushUnqualifiedRefName" }, [ADVICE_PUSH_UPDATE_REJECTED] = { "pushUpdateRejected" }, [ADVICE_PUSH_UPDATE_REJECTED_ALIAS] = { "pushNonFastForward" }, /* backwards compatibility */ [ADVICE_REBASE_TODO_ERROR] = { "rebaseTodoError" }, [ADVICE_REF_SYNTAX] = { "refSyntax" }, [ADVICE_RESET_NO_REFRESH_WARNING] = { "resetNoRefresh" }, [ADVICE_RESOLVE_CONFLICT] = { "resolveConflict" }, [ADVICE_RM_HINTS] = { "rmHints" }, [ADVICE_SEQUENCER_IN_USE] = { "sequencerInUse" }, [ADVICE_SET_UPSTREAM_FAILURE] = { "setUpstreamFailure" }, [ADVICE_SKIPPED_CHERRY_PICKS] = { "skippedCherryPicks" }, [ADVICE_SPARSE_INDEX_EXPANDED] = { "sparseIndexExpanded" }, [ADVICE_STATUS_AHEAD_BEHIND_WARNING] = { "statusAheadBehindWarning" }, [ADVICE_STATUS_HINTS] = { "statusHints" }, [ADVICE_STATUS_U_OPTION] = { "statusUoption" }, [ADVICE_SUBMODULES_NOT_UPDATED] = { "submodulesNotUpdated" }, [ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE] = { "submoduleAlternateErrorStrategyDie" }, [ADVICE_SUBMODULE_MERGE_CONFLICT] = { "submoduleMergeConflict" }, [ADVICE_SUGGEST_DETACHING_HEAD] = { "suggestDetachingHead" }, [ADVICE_UPDATE_SPARSE_PATH] = { "updateSparsePath" }, [ADVICE_USE_CORE_FSMONITOR_CONFIG] = { "useCoreFSMonitorConfig" }, [ADVICE_WAITING_FOR_EDITOR] = { "waitingForEditor" }, [ADVICE_WORKTREE_ADD_ORPHAN] = { "worktreeAddOrphan" }, }; static const char turn_off_instructions[] = N_("\n" "Disable this message with \"git config set advice.%s false\""); static void vadvise(const char *advice, int display_instructions, const char *key, va_list params) { struct strbuf buf = STRBUF_INIT; const char *cp, *np; strbuf_vaddf(&buf, advice, params); if (display_instructions) strbuf_addf(&buf, turn_off_instructions, key); for (cp = buf.buf; *cp; cp = np) { np = strchrnul(cp, '\n'); fprintf(stderr, _("%shint:%s%.*s%s\n"), advise_get_color(ADVICE_COLOR_HINT), (np == cp) ? "" : " ", (int)(np - cp), cp, advise_get_color(ADVICE_COLOR_RESET)); if (*np) np++; } strbuf_release(&buf); } void advise(const char *advice, ...) { va_list params; va_start(params, advice); vadvise(advice, 0, "", params); va_end(params); } int advice_enabled(enum advice_type type) { int enabled = advice_setting[type].level != ADVICE_LEVEL_DISABLED; static int globally_enabled = -1; if (globally_enabled < 0) globally_enabled = git_env_bool(GIT_ADVICE_ENVIRONMENT, 1); if (!globally_enabled) return 0; if (type == ADVICE_PUSH_UPDATE_REJECTED) return enabled && advice_enabled(ADVICE_PUSH_UPDATE_REJECTED_ALIAS); return enabled; } void advise_if_enabled(enum advice_type type, const char *advice, ...) { va_list params; if (!advice_enabled(type)) return; va_start(params, advice); vadvise(advice, !advice_setting[type].level, advice_setting[type].key, params); va_end(params); } int git_default_advice_config(const char *var, const char *value) { const char *k, *slot_name; if (!strcmp(var, "color.advice")) { advice_use_color = git_config_colorbool(var, value); return 0; } if (skip_prefix(var, "color.advice.", &slot_name)) { int slot = parse_advise_color_slot(slot_name); if (slot < 0) return 0; if (!value) return config_error_nonbool(var); return color_parse(value, advice_colors[slot]); } if (!skip_prefix(var, "advice.", &k)) return 0; for (size_t i = 0; i < ARRAY_SIZE(advice_setting); i++) { if (strcasecmp(k, advice_setting[i].key)) continue; advice_setting[i].level = git_config_bool(var, value) ? ADVICE_LEVEL_ENABLED : ADVICE_LEVEL_DISABLED; return 0; } return 0; } void list_config_advices(struct string_list *list, const char *prefix) { for (size_t i = 0; i < ARRAY_SIZE(advice_setting); i++) list_config_item(list, prefix, advice_setting[i].key); } int error_resolve_conflict(const char *me) { if (!strcmp(me, "cherry-pick")) error(_("Cherry-picking is not possible because you have unmerged files.")); else if (!strcmp(me, "commit")) error(_("Committing is not possible because you have unmerged files.")); else if (!strcmp(me, "merge")) error(_("Merging is not possible because you have unmerged files.")); else if (!strcmp(me, "pull")) error(_("Pulling is not possible because you have unmerged files.")); else if (!strcmp(me, "revert")) error(_("Reverting is not possible because you have unmerged files.")); else if (!strcmp(me, "rebase")) error(_("Rebasing is not possible because you have unmerged files.")); else BUG("Unhandled conflict reason '%s'", me); if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) /* * Message used both when 'git commit' fails and when * other commands doing a merge do. */ advise(_("Fix them up in the work tree, and then use 'git add/rm '\n" "as appropriate to mark resolution and make a commit.")); return -1; } void NORETURN die_resolve_conflict(const char *me) { error_resolve_conflict(me); die(_("Exiting because of an unresolved conflict.")); } void NORETURN die_conclude_merge(void) { error(_("You have not concluded your merge (MERGE_HEAD exists).")); if (advice_enabled(ADVICE_RESOLVE_CONFLICT)) advise(_("Please, commit your changes before merging.")); die(_("Exiting because of unfinished merge.")); } void NORETURN die_ff_impossible(void) { advise_if_enabled(ADVICE_DIVERGING, _("Diverging branches can't be fast-forwarded, you need to either:\n" "\n" "\tgit merge --no-ff\n" "\n" "or:\n" "\n" "\tgit rebase\n")); die(_("Not possible to fast-forward, aborting.")); } void advise_on_updating_sparse_paths(struct string_list *pathspec_list) { struct string_list_item *item; if (!pathspec_list->nr) return; fprintf(stderr, _("The following paths and/or pathspecs matched paths that exist\n" "outside of your sparse-checkout definition, so will not be\n" "updated in the index:\n")); for_each_string_list_item(item, pathspec_list) fprintf(stderr, "%s\n", item->string); advise_if_enabled(ADVICE_UPDATE_SPARSE_PATH, _("If you intend to update such entries, try one of the following:\n" "* Use the --sparse option.\n" "* Disable or modify the sparsity rules.")); } void detach_advice(const char *new_name) { const char *fmt = _("Note: switching to '%s'.\n" "\n" "You are in 'detached HEAD' state. You can look around, make experimental\n" "changes and commit them, and you can discard any commits you make in this\n" "state without impacting any branches by switching back to a branch.\n" "\n" "If you want to create a new branch to retain commits you create, you may\n" "do so (now or later) by using -c with the switch command. Example:\n" "\n" " git switch -c \n" "\n" "Or undo this operation with:\n" "\n" " git switch -\n" "\n" "Turn off this advice by setting config variable advice.detachedHead to false\n\n"); fprintf(stderr, fmt, new_name); } void advise_on_moving_dirty_path(struct string_list *pathspec_list) { struct string_list_item *item; if (!pathspec_list->nr) return; fprintf(stderr, _("The following paths have been moved outside the\n" "sparse-checkout definition but are not sparse due to local\n" "modifications.\n")); for_each_string_list_item(item, pathspec_list) fprintf(stderr, "%s\n", item->string); advise_if_enabled(ADVICE_UPDATE_SPARSE_PATH, _("To correct the sparsity of these paths, do the following:\n" "* Use \"git add --sparse \" to update the index\n" "* Use \"git sparse-checkout reapply\" to apply the sparsity rules")); } git-cinnabar-0.7.0/git-core/advice.h000064400000000000000000000047731046102023000153160ustar 00000000000000#ifndef ADVICE_H #define ADVICE_H struct string_list; /* * To add a new advice, you need to: * Define a new advice_type. * Add a new entry to advice_setting array. * Add the new config variable to Documentation/config/advice.txt. * Call advise_if_enabled to print your advice. */ enum advice_type { ADVICE_ADD_EMBEDDED_REPO, ADVICE_ADD_EMPTY_PATHSPEC, ADVICE_ADD_IGNORED_FILE, ADVICE_AMBIGUOUS_FETCH_REFSPEC, ADVICE_AM_WORK_DIR, ADVICE_CHECKOUT_AMBIGUOUS_REMOTE_BRANCH_NAME, ADVICE_COMMIT_BEFORE_MERGE, ADVICE_DETACHED_HEAD, ADVICE_DIVERGING, ADVICE_FETCH_SET_HEAD_WARN, ADVICE_FETCH_SHOW_FORCED_UPDATES, ADVICE_FORCE_DELETE_BRANCH, ADVICE_GRAFT_FILE_DEPRECATED, ADVICE_IGNORED_HOOK, ADVICE_IMPLICIT_IDENTITY, ADVICE_MERGE_CONFLICT, ADVICE_NAME_TOO_LONG, ADVICE_NESTED_TAG, ADVICE_OBJECT_NAME_WARNING, ADVICE_PUSH_ALREADY_EXISTS, ADVICE_PUSH_FETCH_FIRST, ADVICE_PUSH_NEEDS_FORCE, ADVICE_PUSH_NON_FF_CURRENT, ADVICE_PUSH_NON_FF_MATCHING, ADVICE_PUSH_REF_NEEDS_UPDATE, ADVICE_PUSH_UNQUALIFIED_REF_NAME, ADVICE_PUSH_UPDATE_REJECTED, ADVICE_PUSH_UPDATE_REJECTED_ALIAS, ADVICE_REBASE_TODO_ERROR, ADVICE_REF_SYNTAX, ADVICE_RESET_NO_REFRESH_WARNING, ADVICE_RESOLVE_CONFLICT, ADVICE_RM_HINTS, ADVICE_SEQUENCER_IN_USE, ADVICE_SET_UPSTREAM_FAILURE, ADVICE_SKIPPED_CHERRY_PICKS, ADVICE_SPARSE_INDEX_EXPANDED, ADVICE_STATUS_AHEAD_BEHIND_WARNING, ADVICE_STATUS_HINTS, ADVICE_STATUS_U_OPTION, ADVICE_SUBMODULES_NOT_UPDATED, ADVICE_SUBMODULE_ALTERNATE_ERROR_STRATEGY_DIE, ADVICE_SUBMODULE_MERGE_CONFLICT, ADVICE_SUGGEST_DETACHING_HEAD, ADVICE_UPDATE_SPARSE_PATH, ADVICE_USE_CORE_FSMONITOR_CONFIG, ADVICE_WAITING_FOR_EDITOR, ADVICE_WORKTREE_ADD_ORPHAN, }; int git_default_advice_config(const char *var, const char *value); __attribute__((format (printf, 1, 2))) void advise(const char *advice, ...); /** * Checks if advice type is enabled (can be printed to the user). * Should be called before advise(). */ int advice_enabled(enum advice_type type); /** * Checks the visibility of the advice before printing. */ __attribute__((format (printf, 2, 3))) void advise_if_enabled(enum advice_type type, const char *advice, ...); int error_resolve_conflict(const char *me); void NORETURN die_resolve_conflict(const char *me); void NORETURN die_conclude_merge(void); void NORETURN die_ff_impossible(void); void advise_on_updating_sparse_paths(struct string_list *pathspec_list); void detach_advice(const char *new_name); void advise_on_moving_dirty_path(struct string_list *pathspec_list); #endif /* ADVICE_H */ git-cinnabar-0.7.0/git-core/alias.c000064400000000000000000000054501046102023000151400ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "git-compat-util.h" #include "alias.h" #include "config.h" #include "gettext.h" #include "strbuf.h" #include "string-list.h" struct config_alias_data { const char *alias; char *v; struct string_list *list; }; static int config_alias_cb(const char *key, const char *value, const struct config_context *ctx UNUSED, void *d) { struct config_alias_data *data = d; const char *p; if (!skip_prefix(key, "alias.", &p)) return 0; if (data->alias) { if (!strcasecmp(p, data->alias)) { FREE_AND_NULL(data->v); return git_config_string(&data->v, key, value); } } else if (data->list) { string_list_append(data->list, p); } return 0; } char *alias_lookup(const char *alias) { struct config_alias_data data = { alias, NULL }; read_early_config(the_repository, config_alias_cb, &data); return data.v; } void list_aliases(struct string_list *list) { struct config_alias_data data = { NULL, NULL, list }; read_early_config(the_repository, config_alias_cb, &data); } void quote_cmdline(struct strbuf *buf, const char **argv) { for (const char **argp = argv; *argp; argp++) { if (argp != argv) strbuf_addch(buf, ' '); strbuf_addch(buf, '"'); for (const char *p = *argp; *p; p++) { const char c = *p; if (c == '"' || c =='\\') strbuf_addch(buf, '\\'); strbuf_addch(buf, c); } strbuf_addch(buf, '"'); } } #define SPLIT_CMDLINE_BAD_ENDING 1 #define SPLIT_CMDLINE_UNCLOSED_QUOTE 2 #define SPLIT_CMDLINE_ARGC_OVERFLOW 3 static const char *split_cmdline_errors[] = { N_("cmdline ends with \\"), N_("unclosed quote"), N_("too many arguments"), }; int split_cmdline(char *cmdline, const char ***argv) { size_t src, dst, count = 0, size = 16; char quoted = 0; ALLOC_ARRAY(*argv, size); /* split alias_string */ (*argv)[count++] = cmdline; for (src = dst = 0; cmdline[src];) { char c = cmdline[src]; if (!quoted && isspace(c)) { cmdline[dst++] = 0; while (cmdline[++src] && isspace(cmdline[src])) ; /* skip */ ALLOC_GROW(*argv, count + 1, size); (*argv)[count++] = cmdline + dst; } else if (!quoted && (c == '\'' || c == '"')) { quoted = c; src++; } else if (c == quoted) { quoted = 0; src++; } else { if (c == '\\' && quoted != '\'') { src++; c = cmdline[src]; if (!c) { FREE_AND_NULL(*argv); return -SPLIT_CMDLINE_BAD_ENDING; } } cmdline[dst++] = c; src++; } } cmdline[dst] = 0; if (quoted) { FREE_AND_NULL(*argv); return -SPLIT_CMDLINE_UNCLOSED_QUOTE; } if (count >= INT_MAX) { FREE_AND_NULL(*argv); return -SPLIT_CMDLINE_ARGC_OVERFLOW; } ALLOC_GROW(*argv, count + 1, size); (*argv)[count] = NULL; return count; } const char *split_cmdline_strerror(int split_cmdline_errno) { return split_cmdline_errors[-split_cmdline_errno - 1]; } git-cinnabar-0.7.0/git-core/alias.h000064400000000000000000000006711046102023000151450ustar 00000000000000#ifndef ALIAS_H #define ALIAS_H struct strbuf; struct string_list; char *alias_lookup(const char *alias); /* Quote argv so buf can be parsed by split_cmdline() */ void quote_cmdline(struct strbuf *buf, const char **argv); int split_cmdline(char *cmdline, const char ***argv); /* Takes a negative value returned by split_cmdline */ const char *split_cmdline_strerror(int cmdline_errno); void list_aliases(struct string_list *list); #endif git-cinnabar-0.7.0/git-core/alloc.c000064400000000000000000000053301046102023000151360ustar 00000000000000/* * alloc.c - specialized allocator for internal objects * * Copyright (C) 2006 Linus Torvalds * * The standard malloc/free wastes too much space for objects, partly because * it maintains all the allocation infrastructure, but even more because it ends * up with maximal alignment because it doesn't know what the object alignment * for the new allocation is. */ #include "git-compat-util.h" #include "object.h" #include "blob.h" #include "tree.h" #include "commit.h" #include "repository.h" #include "tag.h" #include "alloc.h" #define BLOCKING 1024 union any_object { struct object object; struct blob blob; struct tree tree; struct commit commit; struct tag tag; }; struct alloc_state { int nr; /* number of nodes left in current allocation */ void *p; /* first free node in current allocation */ /* bookkeeping of allocations */ void **slabs; int slab_nr, slab_alloc; }; struct alloc_state *allocate_alloc_state(void) { return xcalloc(1, sizeof(struct alloc_state)); } void clear_alloc_state(struct alloc_state *s) { while (s->slab_nr > 0) { s->slab_nr--; free(s->slabs[s->slab_nr]); } FREE_AND_NULL(s->slabs); } static inline void *alloc_node(struct alloc_state *s, size_t node_size) { void *ret; if (!s->nr) { s->nr = BLOCKING; s->p = xmalloc(BLOCKING * node_size); ALLOC_GROW(s->slabs, s->slab_nr + 1, s->slab_alloc); s->slabs[s->slab_nr++] = s->p; } s->nr--; ret = s->p; s->p = (char *)s->p + node_size; memset(ret, 0, node_size); return ret; } void *alloc_blob_node(struct repository *r) { struct blob *b = alloc_node(r->parsed_objects->blob_state, sizeof(struct blob)); b->object.type = OBJ_BLOB; return b; } void *alloc_tree_node(struct repository *r) { struct tree *t = alloc_node(r->parsed_objects->tree_state, sizeof(struct tree)); t->object.type = OBJ_TREE; return t; } void *alloc_tag_node(struct repository *r) { struct tag *t = alloc_node(r->parsed_objects->tag_state, sizeof(struct tag)); t->object.type = OBJ_TAG; return t; } void *alloc_object_node(struct repository *r) { struct object *obj = alloc_node(r->parsed_objects->object_state, sizeof(union any_object)); obj->type = OBJ_NONE; return obj; } /* * The returned count is to be used as an index into commit slabs, * that are *NOT* maintained per repository, and that is why a single * global counter is used. */ static unsigned int alloc_commit_index(void) { static unsigned int parsed_commits_count; return parsed_commits_count++; } void init_commit_node(struct commit *c) { c->object.type = OBJ_COMMIT; c->index = alloc_commit_index(); } void *alloc_commit_node(struct repository *r) { struct commit *c = alloc_node(r->parsed_objects->commit_state, sizeof(struct commit)); init_commit_node(c); return c; } git-cinnabar-0.7.0/git-core/alloc.h000064400000000000000000000007461046102023000151510ustar 00000000000000#ifndef ALLOC_H #define ALLOC_H struct alloc_state; struct tree; struct commit; struct tag; struct repository; void *alloc_blob_node(struct repository *r); void *alloc_tree_node(struct repository *r); void init_commit_node(struct commit *c); void *alloc_commit_node(struct repository *r); void *alloc_tag_node(struct repository *r); void *alloc_object_node(struct repository *r); struct alloc_state *allocate_alloc_state(void); void clear_alloc_state(struct alloc_state *s); #endif git-cinnabar-0.7.0/git-core/apply.c000064400000000000000000004152501046102023000151770ustar 00000000000000/* * apply.c * * Copyright (C) Linus Torvalds, 2005 * * This applies patches on top of some (arbitrary) version of the SCM. * */ #define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "abspath.h" #include "base85.h" #include "config.h" #include "object-store-ll.h" #include "delta.h" #include "diff.h" #include "dir.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "xdiff-interface.h" #include "merge-ll.h" #include "lockfile.h" #include "name-hash.h" #include "object-name.h" #include "object-file.h" #include "parse-options.h" #include "path.h" #include "quote.h" #include "read-cache.h" #include "repository.h" #include "rerere.h" #include "apply.h" #include "entry.h" #include "setup.h" #include "symlinks.h" #include "wildmatch.h" #include "ws.h" struct gitdiff_data { struct strbuf *root; int linenr; int p_value; }; static void git_apply_config(void) { git_config_get_string("apply.whitespace", &apply_default_whitespace); git_config_get_string("apply.ignorewhitespace", &apply_default_ignorewhitespace); git_config(git_xmerge_config, NULL); } static int parse_whitespace_option(struct apply_state *state, const char *option) { if (!option) { state->ws_error_action = warn_on_ws_error; return 0; } if (!strcmp(option, "warn")) { state->ws_error_action = warn_on_ws_error; return 0; } if (!strcmp(option, "nowarn")) { state->ws_error_action = nowarn_ws_error; return 0; } if (!strcmp(option, "error")) { state->ws_error_action = die_on_ws_error; return 0; } if (!strcmp(option, "error-all")) { state->ws_error_action = die_on_ws_error; state->squelch_whitespace_errors = 0; return 0; } if (!strcmp(option, "strip") || !strcmp(option, "fix")) { state->ws_error_action = correct_ws_error; return 0; } /* * Please update $__git_whitespacelist in git-completion.bash, * Documentation/git-apply.txt, and Documentation/git-am.txt * when you add new options. */ return error(_("unrecognized whitespace option '%s'"), option); } static int parse_ignorewhitespace_option(struct apply_state *state, const char *option) { if (!option || !strcmp(option, "no") || !strcmp(option, "false") || !strcmp(option, "never") || !strcmp(option, "none")) { state->ws_ignore_action = ignore_ws_none; return 0; } if (!strcmp(option, "change")) { state->ws_ignore_action = ignore_ws_change; return 0; } return error(_("unrecognized whitespace ignore option '%s'"), option); } int init_apply_state(struct apply_state *state, struct repository *repo, const char *prefix) { memset(state, 0, sizeof(*state)); state->prefix = prefix; state->repo = repo; state->apply = 1; state->line_termination = '\n'; state->p_value = 1; state->p_context = UINT_MAX; state->squelch_whitespace_errors = 5; state->ws_error_action = warn_on_ws_error; state->ws_ignore_action = ignore_ws_none; state->linenr = 1; string_list_init_nodup(&state->fn_table); string_list_init_nodup(&state->limit_by_name); strset_init(&state->removed_symlinks); strset_init(&state->kept_symlinks); strbuf_init(&state->root, 0); git_apply_config(); if (apply_default_whitespace && parse_whitespace_option(state, apply_default_whitespace)) return -1; if (apply_default_ignorewhitespace && parse_ignorewhitespace_option(state, apply_default_ignorewhitespace)) return -1; return 0; } void clear_apply_state(struct apply_state *state) { string_list_clear(&state->limit_by_name, 0); strset_clear(&state->removed_symlinks); strset_clear(&state->kept_symlinks); strbuf_release(&state->root); FREE_AND_NULL(state->fake_ancestor); /* &state->fn_table is cleared at the end of apply_patch() */ } static void mute_routine(const char *msg UNUSED, va_list params UNUSED) { /* do nothing */ } int check_apply_state(struct apply_state *state, int force_apply) { int is_not_gitdir = !startup_info->have_repository; if (state->apply_with_reject && state->threeway) return error(_("options '%s' and '%s' cannot be used together"), "--reject", "--3way"); if (state->threeway) { if (is_not_gitdir) return error(_("'%s' outside a repository"), "--3way"); state->check_index = 1; } if (state->apply_with_reject) { state->apply = 1; if (state->apply_verbosity == verbosity_normal) state->apply_verbosity = verbosity_verbose; } if (!force_apply && (state->diffstat || state->numstat || state->summary || state->check || state->fake_ancestor)) state->apply = 0; if (state->check_index && is_not_gitdir) return error(_("'%s' outside a repository"), "--index"); if (state->cached) { if (is_not_gitdir) return error(_("'%s' outside a repository"), "--cached"); state->check_index = 1; } if (state->ita_only && (state->check_index || is_not_gitdir)) state->ita_only = 0; if (state->check_index) state->unsafe_paths = 0; if (state->apply_verbosity <= verbosity_silent) { state->saved_error_routine = get_error_routine(); state->saved_warn_routine = get_warn_routine(); set_error_routine(mute_routine); set_warn_routine(mute_routine); } return 0; } static void set_default_whitespace_mode(struct apply_state *state) { if (!state->whitespace_option && !apply_default_whitespace) state->ws_error_action = (state->apply ? warn_on_ws_error : nowarn_ws_error); } /* * This represents one "hunk" from a patch, starting with * "@@ -oldpos,oldlines +newpos,newlines @@" marker. The * patch text is pointed at by patch, and its byte length * is stored in size. leading and trailing are the number * of context lines. */ struct fragment { unsigned long leading, trailing; unsigned long oldpos, oldlines; unsigned long newpos, newlines; /* * 'patch' is usually borrowed from buf in apply_patch(), * but some codepaths store an allocated buffer. */ const char *patch; unsigned free_patch:1, rejected:1; int size; int linenr; struct fragment *next; }; /* * When dealing with a binary patch, we reuse "leading" field * to store the type of the binary hunk, either deflated "delta" * or deflated "literal". */ #define binary_patch_method leading #define BINARY_DELTA_DEFLATED 1 #define BINARY_LITERAL_DEFLATED 2 static void free_fragment_list(struct fragment *list) { while (list) { struct fragment *next = list->next; if (list->free_patch) free((char *)list->patch); free(list); list = next; } } void release_patch(struct patch *patch) { free_fragment_list(patch->fragments); free(patch->def_name); free(patch->old_name); free(patch->new_name); free(patch->result); } static void free_patch(struct patch *patch) { release_patch(patch); free(patch); } static void free_patch_list(struct patch *list) { while (list) { struct patch *next = list->next; free_patch(list); list = next; } } /* * A line in a file, len-bytes long (includes the terminating LF, * except for an incomplete line at the end if the file ends with * one), and its contents hashes to 'hash'. */ struct line { size_t len; unsigned hash : 24; unsigned flag : 8; #define LINE_COMMON 1 #define LINE_PATCHED 2 }; /* * This represents a "file", which is an array of "lines". */ struct image { struct strbuf buf; struct line *line; size_t line_nr, line_alloc; }; #define IMAGE_INIT { \ .buf = STRBUF_INIT, \ } static void image_init(struct image *image) { struct image empty = IMAGE_INIT; memcpy(image, &empty, sizeof(*image)); } static void image_clear(struct image *image) { strbuf_release(&image->buf); free(image->line); image_init(image); } static uint32_t hash_line(const char *cp, size_t len) { size_t i; uint32_t h; for (i = 0, h = 0; i < len; i++) { if (!isspace(cp[i])) { h = h * 3 + (cp[i] & 0xff); } } return h; } static void image_add_line(struct image *img, const char *bol, size_t len, unsigned flag) { ALLOC_GROW(img->line, img->line_nr + 1, img->line_alloc); img->line[img->line_nr].len = len; img->line[img->line_nr].hash = hash_line(bol, len); img->line[img->line_nr].flag = flag; img->line_nr++; } /* * "buf" has the file contents to be patched (read from various sources). * attach it to "image" and add line-based index to it. * "image" now owns the "buf". */ static void image_prepare(struct image *image, char *buf, size_t len, int prepare_linetable) { const char *cp, *ep; image_clear(image); strbuf_attach(&image->buf, buf, len, len + 1); if (!prepare_linetable) return; ep = image->buf.buf + image->buf.len; cp = image->buf.buf; while (cp < ep) { const char *next; for (next = cp; next < ep && *next != '\n'; next++) ; if (next < ep) next++; image_add_line(image, cp, next - cp, 0); cp = next; } } static void image_remove_first_line(struct image *img) { strbuf_remove(&img->buf, 0, img->line[0].len); img->line_nr--; if (img->line_nr) MOVE_ARRAY(img->line, img->line + 1, img->line_nr); } static void image_remove_last_line(struct image *img) { size_t last_line_len = img->line[img->line_nr - 1].len; strbuf_setlen(&img->buf, img->buf.len - last_line_len); img->line_nr--; } /* fmt must contain _one_ %s and no other substitution */ static void say_patch_name(FILE *output, const char *fmt, struct patch *patch) { struct strbuf sb = STRBUF_INIT; if (patch->old_name && patch->new_name && strcmp(patch->old_name, patch->new_name)) { quote_c_style(patch->old_name, &sb, NULL, 0); strbuf_addstr(&sb, " => "); quote_c_style(patch->new_name, &sb, NULL, 0); } else { const char *n = patch->new_name; if (!n) n = patch->old_name; quote_c_style(n, &sb, NULL, 0); } fprintf(output, fmt, sb.buf); fputc('\n', output); strbuf_release(&sb); } #define SLOP (16) /* * apply.c isn't equipped to handle arbitrarily large patches, because * it intermingles `unsigned long` with `int` for the type used to store * buffer lengths. * * Only process patches that are just shy of 1 GiB large in order to * avoid any truncation or overflow issues. */ #define MAX_APPLY_SIZE (1024UL * 1024 * 1023) static int read_patch_file(struct strbuf *sb, int fd) { if (strbuf_read(sb, fd, 0) < 0) return error_errno(_("failed to read patch")); else if (sb->len >= MAX_APPLY_SIZE) return error(_("patch too large")); /* * Make sure that we have some slop in the buffer * so that we can do speculative "memcmp" etc, and * see to it that it is NUL-filled. */ strbuf_grow(sb, SLOP); memset(sb->buf + sb->len, 0, SLOP); return 0; } static unsigned long linelen(const char *buffer, unsigned long size) { unsigned long len = 0; while (size--) { len++; if (*buffer++ == '\n') break; } return len; } static int is_dev_null(const char *str) { return skip_prefix(str, "/dev/null", &str) && isspace(*str); } #define TERM_SPACE 1 #define TERM_TAB 2 static int name_terminate(int c, int terminate) { if (c == ' ' && !(terminate & TERM_SPACE)) return 0; if (c == '\t' && !(terminate & TERM_TAB)) return 0; return 1; } /* remove double slashes to make --index work with such filenames */ static char *squash_slash(char *name) { int i = 0, j = 0; if (!name) return NULL; while (name[i]) { if ((name[j++] = name[i++]) == '/') while (name[i] == '/') i++; } name[j] = '\0'; return name; } static char *find_name_gnu(struct strbuf *root, const char *line, int p_value) { struct strbuf name = STRBUF_INIT; char *cp; /* * Proposed "new-style" GNU patch/diff format; see * https://lore.kernel.org/git/7vll0wvb2a.fsf@assigned-by-dhcp.cox.net/ */ if (unquote_c_style(&name, line, NULL)) { strbuf_release(&name); return NULL; } for (cp = name.buf; p_value; p_value--) { cp = strchr(cp, '/'); if (!cp) { strbuf_release(&name); return NULL; } cp++; } strbuf_remove(&name, 0, cp - name.buf); if (root->len) strbuf_insert(&name, 0, root->buf, root->len); return squash_slash(strbuf_detach(&name, NULL)); } static size_t sane_tz_len(const char *line, size_t len) { const char *tz, *p; if (len < strlen(" +0500") || line[len-strlen(" +0500")] != ' ') return 0; tz = line + len - strlen(" +0500"); if (tz[1] != '+' && tz[1] != '-') return 0; for (p = tz + 2; p != line + len; p++) if (!isdigit(*p)) return 0; return line + len - tz; } static size_t tz_with_colon_len(const char *line, size_t len) { const char *tz, *p; if (len < strlen(" +08:00") || line[len - strlen(":00")] != ':') return 0; tz = line + len - strlen(" +08:00"); if (tz[0] != ' ' || (tz[1] != '+' && tz[1] != '-')) return 0; p = tz + 2; if (!isdigit(*p++) || !isdigit(*p++) || *p++ != ':' || !isdigit(*p++) || !isdigit(*p++)) return 0; return line + len - tz; } static size_t date_len(const char *line, size_t len) { const char *date, *p; if (len < strlen("72-02-05") || line[len-strlen("-05")] != '-') return 0; p = date = line + len - strlen("72-02-05"); if (!isdigit(*p++) || !isdigit(*p++) || *p++ != '-' || !isdigit(*p++) || !isdigit(*p++) || *p++ != '-' || !isdigit(*p++) || !isdigit(*p++)) /* Not a date. */ return 0; if (date - line >= strlen("19") && isdigit(date[-1]) && isdigit(date[-2])) /* 4-digit year */ date -= strlen("19"); return line + len - date; } static size_t short_time_len(const char *line, size_t len) { const char *time, *p; if (len < strlen(" 07:01:32") || line[len-strlen(":32")] != ':') return 0; p = time = line + len - strlen(" 07:01:32"); /* Permit 1-digit hours? */ if (*p++ != ' ' || !isdigit(*p++) || !isdigit(*p++) || *p++ != ':' || !isdigit(*p++) || !isdigit(*p++) || *p++ != ':' || !isdigit(*p++) || !isdigit(*p++)) /* Not a time. */ return 0; return line + len - time; } static size_t fractional_time_len(const char *line, size_t len) { const char *p; size_t n; /* Expected format: 19:41:17.620000023 */ if (!len || !isdigit(line[len - 1])) return 0; p = line + len - 1; /* Fractional seconds. */ while (p > line && isdigit(*p)) p--; if (*p != '.') return 0; /* Hours, minutes, and whole seconds. */ n = short_time_len(line, p - line); if (!n) return 0; return line + len - p + n; } static size_t trailing_spaces_len(const char *line, size_t len) { const char *p; /* Expected format: ' ' x (1 or more) */ if (!len || line[len - 1] != ' ') return 0; p = line + len; while (p != line) { p--; if (*p != ' ') return line + len - (p + 1); } /* All spaces! */ return len; } static size_t diff_timestamp_len(const char *line, size_t len) { const char *end = line + len; size_t n; /* * Posix: 2010-07-05 19:41:17 * GNU: 2010-07-05 19:41:17.620000023 -0500 */ if (!isdigit(end[-1])) return 0; n = sane_tz_len(line, end - line); if (!n) n = tz_with_colon_len(line, end - line); end -= n; n = short_time_len(line, end - line); if (!n) n = fractional_time_len(line, end - line); end -= n; n = date_len(line, end - line); if (!n) /* No date. Too bad. */ return 0; end -= n; if (end == line) /* No space before date. */ return 0; if (end[-1] == '\t') { /* Success! */ end--; return line + len - end; } if (end[-1] != ' ') /* No space before date. */ return 0; /* Whitespace damage. */ end -= trailing_spaces_len(line, end - line); return line + len - end; } static char *find_name_common(struct strbuf *root, const char *line, const char *def, int p_value, const char *end, int terminate) { int len; const char *start = NULL; if (p_value == 0) start = line; while (line != end) { char c = *line; if (!end && isspace(c)) { if (c == '\n') break; if (name_terminate(c, terminate)) break; } line++; if (c == '/' && !--p_value) start = line; } if (!start) return squash_slash(xstrdup_or_null(def)); len = line - start; if (!len) return squash_slash(xstrdup_or_null(def)); /* * Generally we prefer the shorter name, especially * if the other one is just a variation of that with * something else tacked on to the end (ie "file.orig" * or "file~"). */ if (def) { int deflen = strlen(def); if (deflen < len && !strncmp(start, def, deflen)) return squash_slash(xstrdup(def)); } if (root->len) { char *ret = xstrfmt("%s%.*s", root->buf, len, start); return squash_slash(ret); } return squash_slash(xmemdupz(start, len)); } static char *find_name(struct strbuf *root, const char *line, char *def, int p_value, int terminate) { if (*line == '"') { char *name = find_name_gnu(root, line, p_value); if (name) return name; } return find_name_common(root, line, def, p_value, NULL, terminate); } static char *find_name_traditional(struct strbuf *root, const char *line, char *def, int p_value) { size_t len; size_t date_len; if (*line == '"') { char *name = find_name_gnu(root, line, p_value); if (name) return name; } len = strchrnul(line, '\n') - line; date_len = diff_timestamp_len(line, len); if (!date_len) return find_name_common(root, line, def, p_value, NULL, TERM_TAB); len -= date_len; return find_name_common(root, line, def, p_value, line + len, 0); } /* * Given the string after "--- " or "+++ ", guess the appropriate * p_value for the given patch. */ static int guess_p_value(struct apply_state *state, const char *nameline) { char *name, *cp; int val = -1; if (is_dev_null(nameline)) return -1; name = find_name_traditional(&state->root, nameline, NULL, 0); if (!name) return -1; cp = strchr(name, '/'); if (!cp) val = 0; else if (state->prefix) { /* * Does it begin with "a/$our-prefix" and such? Then this is * very likely to apply to our directory. */ if (starts_with(name, state->prefix)) val = count_slashes(state->prefix); else { cp++; if (starts_with(cp, state->prefix)) val = count_slashes(state->prefix) + 1; } } free(name); return val; } /* * Does the ---/+++ line have the POSIX timestamp after the last HT? * GNU diff puts epoch there to signal a creation/deletion event. Is * this such a timestamp? */ static int has_epoch_timestamp(const char *nameline) { /* * We are only interested in epoch timestamp; any non-zero * fraction cannot be one, hence "(\.0+)?" in the regexp below. * For the same reason, the date must be either 1969-12-31 or * 1970-01-01, and the seconds part must be "00". */ const char stamp_regexp[] = "^[0-2][0-9]:([0-5][0-9]):00(\\.0+)?" " " "([-+][0-2][0-9]:?[0-5][0-9])\n"; const char *timestamp = NULL, *cp, *colon; static regex_t *stamp; regmatch_t m[10]; int zoneoffset, epoch_hour, hour, minute; int status; for (cp = nameline; *cp != '\n'; cp++) { if (*cp == '\t') timestamp = cp + 1; } if (!timestamp) return 0; /* * YYYY-MM-DD hh:mm:ss must be from either 1969-12-31 * (west of GMT) or 1970-01-01 (east of GMT) */ if (skip_prefix(timestamp, "1969-12-31 ", ×tamp)) epoch_hour = 24; else if (skip_prefix(timestamp, "1970-01-01 ", ×tamp)) epoch_hour = 0; else return 0; if (!stamp) { stamp = xmalloc(sizeof(*stamp)); if (regcomp(stamp, stamp_regexp, REG_EXTENDED)) { warning(_("Cannot prepare timestamp regexp %s"), stamp_regexp); return 0; } } status = regexec(stamp, timestamp, ARRAY_SIZE(m), m, 0); if (status) { if (status != REG_NOMATCH) warning(_("regexec returned %d for input: %s"), status, timestamp); return 0; } hour = strtol(timestamp, NULL, 10); minute = strtol(timestamp + m[1].rm_so, NULL, 10); zoneoffset = strtol(timestamp + m[3].rm_so + 1, (char **) &colon, 10); if (*colon == ':') zoneoffset = zoneoffset * 60 + strtol(colon + 1, NULL, 10); else zoneoffset = (zoneoffset / 100) * 60 + (zoneoffset % 100); if (timestamp[m[3].rm_so] == '-') zoneoffset = -zoneoffset; return hour * 60 + minute - zoneoffset == epoch_hour * 60; } /* * Get the name etc info from the ---/+++ lines of a traditional patch header * * FIXME! The end-of-filename heuristics are kind of screwy. For existing * files, we can happily check the index for a match, but for creating a * new file we should try to match whatever "patch" does. I have no idea. */ static int parse_traditional_patch(struct apply_state *state, const char *first, const char *second, struct patch *patch) { char *name; first += 4; /* skip "--- " */ second += 4; /* skip "+++ " */ if (!state->p_value_known) { int p, q; p = guess_p_value(state, first); q = guess_p_value(state, second); if (p < 0) p = q; if (0 <= p && p == q) { state->p_value = p; state->p_value_known = 1; } } if (is_dev_null(first)) { patch->is_new = 1; patch->is_delete = 0; name = find_name_traditional(&state->root, second, NULL, state->p_value); patch->new_name = name; } else if (is_dev_null(second)) { patch->is_new = 0; patch->is_delete = 1; name = find_name_traditional(&state->root, first, NULL, state->p_value); patch->old_name = name; } else { char *first_name; first_name = find_name_traditional(&state->root, first, NULL, state->p_value); name = find_name_traditional(&state->root, second, first_name, state->p_value); free(first_name); if (has_epoch_timestamp(first)) { patch->is_new = 1; patch->is_delete = 0; patch->new_name = name; } else if (has_epoch_timestamp(second)) { patch->is_new = 0; patch->is_delete = 1; patch->old_name = name; } else { patch->old_name = name; patch->new_name = xstrdup_or_null(name); } } if (!name) return error(_("unable to find filename in patch at line %d"), state->linenr); return 0; } static int gitdiff_hdrend(struct gitdiff_data *state UNUSED, const char *line UNUSED, struct patch *patch UNUSED) { return 1; } /* * We're anal about diff header consistency, to make * sure that we don't end up having strange ambiguous * patches floating around. * * As a result, gitdiff_{old|new}name() will check * their names against any previous information, just * to make sure.. */ #define DIFF_OLD_NAME 0 #define DIFF_NEW_NAME 1 static int gitdiff_verify_name(struct gitdiff_data *state, const char *line, int isnull, char **name, int side) { if (!*name && !isnull) { *name = find_name(state->root, line, NULL, state->p_value, TERM_TAB); return 0; } if (*name) { char *another; if (isnull) return error(_("git apply: bad git-diff - expected /dev/null, got %s on line %d"), *name, state->linenr); another = find_name(state->root, line, NULL, state->p_value, TERM_TAB); if (!another || strcmp(another, *name)) { free(another); return error((side == DIFF_NEW_NAME) ? _("git apply: bad git-diff - inconsistent new filename on line %d") : _("git apply: bad git-diff - inconsistent old filename on line %d"), state->linenr); } free(another); } else { if (!is_dev_null(line)) return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr); } return 0; } static int gitdiff_oldname(struct gitdiff_data *state, const char *line, struct patch *patch) { return gitdiff_verify_name(state, line, patch->is_new, &patch->old_name, DIFF_OLD_NAME); } static int gitdiff_newname(struct gitdiff_data *state, const char *line, struct patch *patch) { return gitdiff_verify_name(state, line, patch->is_delete, &patch->new_name, DIFF_NEW_NAME); } static int parse_mode_line(const char *line, int linenr, unsigned int *mode) { char *end; *mode = strtoul(line, &end, 8); if (end == line || !isspace(*end)) return error(_("invalid mode on line %d: %s"), linenr, line); *mode = canon_mode(*mode); return 0; } static int gitdiff_oldmode(struct gitdiff_data *state, const char *line, struct patch *patch) { return parse_mode_line(line, state->linenr, &patch->old_mode); } static int gitdiff_newmode(struct gitdiff_data *state, const char *line, struct patch *patch) { return parse_mode_line(line, state->linenr, &patch->new_mode); } static int gitdiff_delete(struct gitdiff_data *state, const char *line, struct patch *patch) { patch->is_delete = 1; free(patch->old_name); patch->old_name = xstrdup_or_null(patch->def_name); return gitdiff_oldmode(state, line, patch); } static int gitdiff_newfile(struct gitdiff_data *state, const char *line, struct patch *patch) { patch->is_new = 1; free(patch->new_name); patch->new_name = xstrdup_or_null(patch->def_name); return gitdiff_newmode(state, line, patch); } static int gitdiff_copysrc(struct gitdiff_data *state, const char *line, struct patch *patch) { patch->is_copy = 1; free(patch->old_name); patch->old_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); return 0; } static int gitdiff_copydst(struct gitdiff_data *state, const char *line, struct patch *patch) { patch->is_copy = 1; free(patch->new_name); patch->new_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); return 0; } static int gitdiff_renamesrc(struct gitdiff_data *state, const char *line, struct patch *patch) { patch->is_rename = 1; free(patch->old_name); patch->old_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); return 0; } static int gitdiff_renamedst(struct gitdiff_data *state, const char *line, struct patch *patch) { patch->is_rename = 1; free(patch->new_name); patch->new_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); return 0; } static int gitdiff_similarity(struct gitdiff_data *state UNUSED, const char *line, struct patch *patch) { unsigned long val = strtoul(line, NULL, 10); if (val <= 100) patch->score = val; return 0; } static int gitdiff_dissimilarity(struct gitdiff_data *state UNUSED, const char *line, struct patch *patch) { unsigned long val = strtoul(line, NULL, 10); if (val <= 100) patch->score = val; return 0; } static int gitdiff_index(struct gitdiff_data *state, const char *line, struct patch *patch) { /* * index line is N hexadecimal, "..", N hexadecimal, * and optional space with octal mode. */ const char *ptr, *eol; int len; const unsigned hexsz = the_hash_algo->hexsz; ptr = strchr(line, '.'); if (!ptr || ptr[1] != '.' || hexsz < ptr - line) return 0; len = ptr - line; memcpy(patch->old_oid_prefix, line, len); patch->old_oid_prefix[len] = 0; line = ptr + 2; ptr = strchr(line, ' '); eol = strchrnul(line, '\n'); if (!ptr || eol < ptr) ptr = eol; len = ptr - line; if (hexsz < len) return 0; memcpy(patch->new_oid_prefix, line, len); patch->new_oid_prefix[len] = 0; if (*ptr == ' ') return gitdiff_oldmode(state, ptr + 1, patch); return 0; } /* * This is normal for a diff that doesn't change anything: we'll fall through * into the next diff. Tell the parser to break out. */ static int gitdiff_unrecognized(struct gitdiff_data *state UNUSED, const char *line UNUSED, struct patch *patch UNUSED) { return 1; } /* * Skip p_value leading components from "line"; as we do not accept * absolute paths, return NULL in that case. */ static const char *skip_tree_prefix(int p_value, const char *line, int llen) { int nslash; int i; if (!p_value) return (llen && line[0] == '/') ? NULL : line; nslash = p_value; for (i = 0; i < llen; i++) { int ch = line[i]; if (ch == '/' && --nslash <= 0) return (i == 0) ? NULL : &line[i + 1]; } return NULL; } /* * This is to extract the same name that appears on "diff --git" * line. We do not find and return anything if it is a rename * patch, and it is OK because we will find the name elsewhere. * We need to reliably find name only when it is mode-change only, * creation or deletion of an empty file. In any of these cases, * both sides are the same name under a/ and b/ respectively. */ static char *git_header_name(int p_value, const char *line, int llen) { const char *name; const char *second = NULL; size_t len, line_len; line += strlen("diff --git "); llen -= strlen("diff --git "); if (*line == '"') { const char *cp; struct strbuf first = STRBUF_INIT; struct strbuf sp = STRBUF_INIT; if (unquote_c_style(&first, line, &second)) goto free_and_fail1; /* strip the a/b prefix including trailing slash */ cp = skip_tree_prefix(p_value, first.buf, first.len); if (!cp) goto free_and_fail1; strbuf_remove(&first, 0, cp - first.buf); /* * second points at one past closing dq of name. * find the second name. */ while ((second < line + llen) && isspace(*second)) second++; if (line + llen <= second) goto free_and_fail1; if (*second == '"') { if (unquote_c_style(&sp, second, NULL)) goto free_and_fail1; cp = skip_tree_prefix(p_value, sp.buf, sp.len); if (!cp) goto free_and_fail1; /* They must match, otherwise ignore */ if (strcmp(cp, first.buf)) goto free_and_fail1; strbuf_release(&sp); return strbuf_detach(&first, NULL); } /* unquoted second */ cp = skip_tree_prefix(p_value, second, line + llen - second); if (!cp) goto free_and_fail1; if (line + llen - cp != first.len || memcmp(first.buf, cp, first.len)) goto free_and_fail1; return strbuf_detach(&first, NULL); free_and_fail1: strbuf_release(&first); strbuf_release(&sp); return NULL; } /* unquoted first name */ name = skip_tree_prefix(p_value, line, llen); if (!name) return NULL; /* * since the first name is unquoted, a dq if exists must be * the beginning of the second name. */ for (second = name; second < line + llen; second++) { if (*second == '"') { struct strbuf sp = STRBUF_INIT; const char *np; if (unquote_c_style(&sp, second, NULL)) goto free_and_fail2; np = skip_tree_prefix(p_value, sp.buf, sp.len); if (!np) goto free_and_fail2; len = sp.buf + sp.len - np; if (len < second - name && !strncmp(np, name, len) && isspace(name[len])) { /* Good */ strbuf_remove(&sp, 0, np - sp.buf); return strbuf_detach(&sp, NULL); } free_and_fail2: strbuf_release(&sp); return NULL; } } /* * Accept a name only if it shows up twice, exactly the same * form. */ second = strchr(name, '\n'); if (!second) return NULL; line_len = second - name; for (len = 0 ; ; len++) { switch (name[len]) { default: continue; case '\n': return NULL; case '\t': case ' ': /* * Is this the separator between the preimage * and the postimage pathname? Again, we are * only interested in the case where there is * no rename, as this is only to set def_name * and a rename patch has the names elsewhere * in an unambiguous form. */ if (!name[len + 1]) return NULL; /* no postimage name */ second = skip_tree_prefix(p_value, name + len + 1, line_len - (len + 1)); /* * If we are at the SP at the end of a directory, * skip_tree_prefix() may return NULL as that makes * it appears as if we have an absolute path. * Keep going to find another SP. */ if (!second) continue; /* * Does len bytes starting at "name" and "second" * (that are separated by one HT or SP we just * found) exactly match? */ if (second[len] == '\n' && !strncmp(name, second, len)) return xmemdupz(name, len); } } } static int check_header_line(int linenr, struct patch *patch) { int extensions = (patch->is_delete == 1) + (patch->is_new == 1) + (patch->is_rename == 1) + (patch->is_copy == 1); if (extensions > 1) return error(_("inconsistent header lines %d and %d"), patch->extension_linenr, linenr); if (extensions && !patch->extension_linenr) patch->extension_linenr = linenr; return 0; } int parse_git_diff_header(struct strbuf *root, int *linenr, int p_value, const char *line, int len, unsigned int size, struct patch *patch) { unsigned long offset; struct gitdiff_data parse_hdr_state; /* A git diff has explicit new/delete information, so we don't guess */ patch->is_new = 0; patch->is_delete = 0; /* * Some things may not have the old name in the * rest of the headers anywhere (pure mode changes, * or removing or adding empty files), so we get * the default name from the header. */ patch->def_name = git_header_name(p_value, line, len); if (patch->def_name && root->len) { char *s = xstrfmt("%s%s", root->buf, patch->def_name); free(patch->def_name); patch->def_name = s; } line += len; size -= len; (*linenr)++; parse_hdr_state.root = root; parse_hdr_state.linenr = *linenr; parse_hdr_state.p_value = p_value; for (offset = len ; size > 0 ; offset += len, size -= len, line += len, (*linenr)++) { static const struct opentry { const char *str; int (*fn)(struct gitdiff_data *, const char *, struct patch *); } optable[] = { { "@@ -", gitdiff_hdrend }, { "--- ", gitdiff_oldname }, { "+++ ", gitdiff_newname }, { "old mode ", gitdiff_oldmode }, { "new mode ", gitdiff_newmode }, { "deleted file mode ", gitdiff_delete }, { "new file mode ", gitdiff_newfile }, { "copy from ", gitdiff_copysrc }, { "copy to ", gitdiff_copydst }, { "rename old ", gitdiff_renamesrc }, { "rename new ", gitdiff_renamedst }, { "rename from ", gitdiff_renamesrc }, { "rename to ", gitdiff_renamedst }, { "similarity index ", gitdiff_similarity }, { "dissimilarity index ", gitdiff_dissimilarity }, { "index ", gitdiff_index }, { "", gitdiff_unrecognized }, }; int i; len = linelen(line, size); if (!len || line[len-1] != '\n') break; for (i = 0; i < ARRAY_SIZE(optable); i++) { const struct opentry *p = optable + i; int oplen = strlen(p->str); int res; if (len < oplen || memcmp(p->str, line, oplen)) continue; res = p->fn(&parse_hdr_state, line + oplen, patch); if (res < 0) return -1; if (check_header_line(*linenr, patch)) return -1; if (res > 0) goto done; break; } } done: if (!patch->old_name && !patch->new_name) { if (!patch->def_name) { error(Q_("git diff header lacks filename information when removing " "%d leading pathname component (line %d)", "git diff header lacks filename information when removing " "%d leading pathname components (line %d)", parse_hdr_state.p_value), parse_hdr_state.p_value, *linenr); return -128; } patch->old_name = xstrdup(patch->def_name); patch->new_name = xstrdup(patch->def_name); } if ((!patch->new_name && !patch->is_delete) || (!patch->old_name && !patch->is_new)) { error(_("git diff header lacks filename information " "(line %d)"), *linenr); return -128; } patch->is_toplevel_relative = 1; return offset; } static int parse_num(const char *line, unsigned long *p) { char *ptr; if (!isdigit(*line)) return 0; *p = strtoul(line, &ptr, 10); return ptr - line; } static int parse_range(const char *line, int len, int offset, const char *expect, unsigned long *p1, unsigned long *p2) { int digits, ex; if (offset < 0 || offset >= len) return -1; line += offset; len -= offset; digits = parse_num(line, p1); if (!digits) return -1; offset += digits; line += digits; len -= digits; *p2 = 1; if (*line == ',') { digits = parse_num(line+1, p2); if (!digits) return -1; offset += digits+1; line += digits+1; len -= digits+1; } ex = strlen(expect); if (ex > len) return -1; if (memcmp(line, expect, ex)) return -1; return offset + ex; } static void recount_diff(const char *line, int size, struct fragment *fragment) { int oldlines = 0, newlines = 0, ret = 0; if (size < 1) { warning("recount: ignore empty hunk"); return; } for (;;) { int len = linelen(line, size); size -= len; line += len; if (size < 1) break; switch (*line) { case ' ': case '\n': newlines++; /* fall through */ case '-': oldlines++; continue; case '+': newlines++; continue; case '\\': continue; case '@': ret = size < 3 || !starts_with(line, "@@ "); break; case 'd': ret = size < 5 || !starts_with(line, "diff "); break; default: ret = -1; break; } if (ret) { warning(_("recount: unexpected line: %.*s"), (int)linelen(line, size), line); return; } break; } fragment->oldlines = oldlines; fragment->newlines = newlines; } /* * Parse a unified diff fragment header of the * form "@@ -a,b +c,d @@" */ static int parse_fragment_header(const char *line, int len, struct fragment *fragment) { int offset; if (!len || line[len-1] != '\n') return -1; /* Figure out the number of lines in a fragment */ offset = parse_range(line, len, 4, " +", &fragment->oldpos, &fragment->oldlines); offset = parse_range(line, len, offset, " @@", &fragment->newpos, &fragment->newlines); return offset; } /* * Find file diff header * * Returns: * -1 if no header was found * -128 in case of error * the size of the header in bytes (called "offset") otherwise */ static int find_header(struct apply_state *state, const char *line, unsigned long size, int *hdrsize, struct patch *patch) { unsigned long offset, len; patch->is_toplevel_relative = 0; patch->is_rename = patch->is_copy = 0; patch->is_new = patch->is_delete = -1; patch->old_mode = patch->new_mode = 0; patch->old_name = patch->new_name = NULL; for (offset = 0; size > 0; offset += len, size -= len, line += len, state->linenr++) { unsigned long nextlen; len = linelen(line, size); if (!len) break; /* Testing this early allows us to take a few shortcuts.. */ if (len < 6) continue; /* * Make sure we don't find any unconnected patch fragments. * That's a sign that we didn't find a header, and that a * patch has become corrupted/broken up. */ if (!memcmp("@@ -", line, 4)) { struct fragment dummy; if (parse_fragment_header(line, len, &dummy) < 0) continue; error(_("patch fragment without header at line %d: %.*s"), state->linenr, (int)len-1, line); return -128; } if (size < len + 6) break; /* * Git patch? It might not have a real patch, just a rename * or mode change, so we handle that specially */ if (!memcmp("diff --git ", line, 11)) { int git_hdr_len = parse_git_diff_header(&state->root, &state->linenr, state->p_value, line, len, size, patch); if (git_hdr_len < 0) return -128; if (git_hdr_len <= len) continue; *hdrsize = git_hdr_len; return offset; } /* --- followed by +++ ? */ if (memcmp("--- ", line, 4) || memcmp("+++ ", line + len, 4)) continue; /* * We only accept unified patches, so we want it to * at least have "@@ -a,b +c,d @@\n", which is 14 chars * minimum ("@@ -0,0 +1 @@\n" is the shortest). */ nextlen = linelen(line + len, size - len); if (size < nextlen + 14 || memcmp("@@ -", line + len + nextlen, 4)) continue; /* Ok, we'll consider it a patch */ if (parse_traditional_patch(state, line, line+len, patch)) return -128; *hdrsize = len + nextlen; state->linenr += 2; return offset; } return -1; } static void record_ws_error(struct apply_state *state, unsigned result, const char *line, int len, int linenr) { char *err; if (!result) return; state->whitespace_error++; if (state->squelch_whitespace_errors && state->squelch_whitespace_errors < state->whitespace_error) return; err = whitespace_error_string(result); if (state->apply_verbosity > verbosity_silent) fprintf(stderr, "%s:%d: %s.\n%.*s\n", state->patch_input_file, linenr, err, len, line); free(err); } static void check_whitespace(struct apply_state *state, const char *line, int len, unsigned ws_rule) { unsigned result = ws_check(line + 1, len - 1, ws_rule); record_ws_error(state, result, line + 1, len - 2, state->linenr); } /* * Check if the patch has context lines with CRLF or * the patch wants to remove lines with CRLF. */ static void check_old_for_crlf(struct patch *patch, const char *line, int len) { if (len >= 2 && line[len-1] == '\n' && line[len-2] == '\r') { patch->ws_rule |= WS_CR_AT_EOL; patch->crlf_in_old = 1; } } /* * Parse a unified diff. Note that this really needs to parse each * fragment separately, since the only way to know the difference * between a "---" that is part of a patch, and a "---" that starts * the next patch is to look at the line counts.. */ static int parse_fragment(struct apply_state *state, const char *line, unsigned long size, struct patch *patch, struct fragment *fragment) { int added, deleted; int len = linelen(line, size), offset; unsigned long oldlines, newlines; unsigned long leading, trailing; offset = parse_fragment_header(line, len, fragment); if (offset < 0) return -1; if (offset > 0 && patch->recount) recount_diff(line + offset, size - offset, fragment); oldlines = fragment->oldlines; newlines = fragment->newlines; leading = 0; trailing = 0; /* Parse the thing.. */ line += len; size -= len; state->linenr++; added = deleted = 0; for (offset = len; 0 < size; offset += len, size -= len, line += len, state->linenr++) { if (!oldlines && !newlines) break; len = linelen(line, size); if (!len || line[len-1] != '\n') return -1; switch (*line) { default: return -1; case '\n': /* newer GNU diff, an empty context line */ case ' ': oldlines--; newlines--; if (!deleted && !added) leading++; trailing++; check_old_for_crlf(patch, line, len); if (!state->apply_in_reverse && state->ws_error_action == correct_ws_error) check_whitespace(state, line, len, patch->ws_rule); break; case '-': if (!state->apply_in_reverse) check_old_for_crlf(patch, line, len); if (state->apply_in_reverse && state->ws_error_action != nowarn_ws_error) check_whitespace(state, line, len, patch->ws_rule); deleted++; oldlines--; trailing = 0; break; case '+': if (state->apply_in_reverse) check_old_for_crlf(patch, line, len); if (!state->apply_in_reverse && state->ws_error_action != nowarn_ws_error) check_whitespace(state, line, len, patch->ws_rule); added++; newlines--; trailing = 0; break; /* * We allow "\ No newline at end of file". Depending * on locale settings when the patch was produced we * don't know what this line looks like. The only * thing we do know is that it begins with "\ ". * Checking for 12 is just for sanity check -- any * l10n of "\ No newline..." is at least that long. */ case '\\': if (len < 12 || memcmp(line, "\\ ", 2)) return -1; break; } } if (oldlines || newlines) return -1; if (!patch->recount && !deleted && !added) return -1; fragment->leading = leading; fragment->trailing = trailing; /* * If a fragment ends with an incomplete line, we failed to include * it in the above loop because we hit oldlines == newlines == 0 * before seeing it. */ if (12 < size && !memcmp(line, "\\ ", 2)) offset += linelen(line, size); patch->lines_added += added; patch->lines_deleted += deleted; if (0 < patch->is_new && oldlines) return error(_("new file depends on old contents")); if (0 < patch->is_delete && newlines) return error(_("deleted file still has contents")); return offset; } /* * We have seen "diff --git a/... b/..." header (or a traditional patch * header). Read hunks that belong to this patch into fragments and hang * them to the given patch structure. * * The (fragment->patch, fragment->size) pair points into the memory given * by the caller, not a copy, when we return. * * Returns: * -1 in case of error, * the number of bytes in the patch otherwise. */ static int parse_single_patch(struct apply_state *state, const char *line, unsigned long size, struct patch *patch) { unsigned long offset = 0; unsigned long oldlines = 0, newlines = 0, context = 0; struct fragment **fragp = &patch->fragments; while (size > 4 && !memcmp(line, "@@ -", 4)) { struct fragment *fragment; int len; CALLOC_ARRAY(fragment, 1); fragment->linenr = state->linenr; len = parse_fragment(state, line, size, patch, fragment); if (len <= 0) { free(fragment); return error(_("corrupt patch at line %d"), state->linenr); } fragment->patch = line; fragment->size = len; oldlines += fragment->oldlines; newlines += fragment->newlines; context += fragment->leading + fragment->trailing; *fragp = fragment; fragp = &fragment->next; offset += len; line += len; size -= len; } /* * If something was removed (i.e. we have old-lines) it cannot * be creation, and if something was added it cannot be * deletion. However, the reverse is not true; --unified=0 * patches that only add are not necessarily creation even * though they do not have any old lines, and ones that only * delete are not necessarily deletion. * * Unfortunately, a real creation/deletion patch do _not_ have * any context line by definition, so we cannot safely tell it * apart with --unified=0 insanity. At least if the patch has * more than one hunk it is not creation or deletion. */ if (patch->is_new < 0 && (oldlines || (patch->fragments && patch->fragments->next))) patch->is_new = 0; if (patch->is_delete < 0 && (newlines || (patch->fragments && patch->fragments->next))) patch->is_delete = 0; if (0 < patch->is_new && oldlines) return error(_("new file %s depends on old contents"), patch->new_name); if (0 < patch->is_delete && newlines) return error(_("deleted file %s still has contents"), patch->old_name); if (!patch->is_delete && !newlines && context && state->apply_verbosity > verbosity_silent) fprintf_ln(stderr, _("** warning: " "file %s becomes empty but is not deleted"), patch->new_name); return offset; } static inline int metadata_changes(struct patch *patch) { return patch->is_rename > 0 || patch->is_copy > 0 || patch->is_new > 0 || patch->is_delete || (patch->old_mode && patch->new_mode && patch->old_mode != patch->new_mode); } static char *inflate_it(const void *data, unsigned long size, unsigned long inflated_size) { git_zstream stream; void *out; int st; memset(&stream, 0, sizeof(stream)); stream.next_in = (unsigned char *)data; stream.avail_in = size; stream.next_out = out = xmalloc(inflated_size); stream.avail_out = inflated_size; git_inflate_init(&stream); st = git_inflate(&stream, Z_FINISH); git_inflate_end(&stream); if ((st != Z_STREAM_END) || stream.total_out != inflated_size) { free(out); return NULL; } return out; } /* * Read a binary hunk and return a new fragment; fragment->patch * points at an allocated memory that the caller must free, so * it is marked as "->free_patch = 1". */ static struct fragment *parse_binary_hunk(struct apply_state *state, char **buf_p, unsigned long *sz_p, int *status_p, int *used_p) { /* * Expect a line that begins with binary patch method ("literal" * or "delta"), followed by the length of data before deflating. * a sequence of 'length-byte' followed by base-85 encoded data * should follow, terminated by a newline. * * Each 5-byte sequence of base-85 encodes up to 4 bytes, * and we would limit the patch line to 66 characters, * so one line can fit up to 13 groups that would decode * to 52 bytes max. The length byte 'A'-'Z' corresponds * to 1-26 bytes, and 'a'-'z' corresponds to 27-52 bytes. */ int llen, used; unsigned long size = *sz_p; char *buffer = *buf_p; int patch_method; unsigned long origlen; char *data = NULL; int hunk_size = 0; struct fragment *frag; llen = linelen(buffer, size); used = llen; *status_p = 0; if (starts_with(buffer, "delta ")) { patch_method = BINARY_DELTA_DEFLATED; origlen = strtoul(buffer + 6, NULL, 10); } else if (starts_with(buffer, "literal ")) { patch_method = BINARY_LITERAL_DEFLATED; origlen = strtoul(buffer + 8, NULL, 10); } else return NULL; state->linenr++; buffer += llen; size -= llen; while (1) { int byte_length, max_byte_length, newsize; llen = linelen(buffer, size); used += llen; state->linenr++; if (llen == 1) { /* consume the blank line */ buffer++; size--; break; } /* * Minimum line is "A00000\n" which is 7-byte long, * and the line length must be multiple of 5 plus 2. */ if ((llen < 7) || (llen-2) % 5) goto corrupt; max_byte_length = (llen - 2) / 5 * 4; byte_length = *buffer; if ('A' <= byte_length && byte_length <= 'Z') byte_length = byte_length - 'A' + 1; else if ('a' <= byte_length && byte_length <= 'z') byte_length = byte_length - 'a' + 27; else goto corrupt; /* if the input length was not multiple of 4, we would * have filler at the end but the filler should never * exceed 3 bytes */ if (max_byte_length < byte_length || byte_length <= max_byte_length - 4) goto corrupt; newsize = hunk_size + byte_length; data = xrealloc(data, newsize); if (decode_85(data + hunk_size, buffer + 1, byte_length)) goto corrupt; hunk_size = newsize; buffer += llen; size -= llen; } CALLOC_ARRAY(frag, 1); frag->patch = inflate_it(data, hunk_size, origlen); frag->free_patch = 1; if (!frag->patch) goto corrupt; free(data); frag->size = origlen; *buf_p = buffer; *sz_p = size; *used_p = used; frag->binary_patch_method = patch_method; return frag; corrupt: free(data); *status_p = -1; error(_("corrupt binary patch at line %d: %.*s"), state->linenr-1, llen-1, buffer); return NULL; } /* * Returns: * -1 in case of error, * the length of the parsed binary patch otherwise */ static int parse_binary(struct apply_state *state, char *buffer, unsigned long size, struct patch *patch) { /* * We have read "GIT binary patch\n"; what follows is a line * that says the patch method (currently, either "literal" or * "delta") and the length of data before deflating; a * sequence of 'length-byte' followed by base-85 encoded data * follows. * * When a binary patch is reversible, there is another binary * hunk in the same format, starting with patch method (either * "literal" or "delta") with the length of data, and a sequence * of length-byte + base-85 encoded data, terminated with another * empty line. This data, when applied to the postimage, produces * the preimage. */ struct fragment *forward; struct fragment *reverse; int status; int used, used_1; forward = parse_binary_hunk(state, &buffer, &size, &status, &used); if (!forward && !status) /* there has to be one hunk (forward hunk) */ return error(_("unrecognized binary patch at line %d"), state->linenr-1); if (status) /* otherwise we already gave an error message */ return status; reverse = parse_binary_hunk(state, &buffer, &size, &status, &used_1); if (reverse) used += used_1; else if (status) { /* * Not having reverse hunk is not an error, but having * a corrupt reverse hunk is. */ free((void*) forward->patch); free(forward); return status; } forward->next = reverse; patch->fragments = forward; patch->is_binary = 1; return used; } static void prefix_one(struct apply_state *state, char **name) { char *old_name = *name; if (!old_name) return; *name = prefix_filename(state->prefix, *name); free(old_name); } static void prefix_patch(struct apply_state *state, struct patch *p) { if (!state->prefix || p->is_toplevel_relative) return; prefix_one(state, &p->new_name); prefix_one(state, &p->old_name); } /* * include/exclude */ static void add_name_limit(struct apply_state *state, const char *name, int exclude) { struct string_list_item *it; it = string_list_append(&state->limit_by_name, name); it->util = exclude ? NULL : (void *) 1; } static int use_patch(struct apply_state *state, struct patch *p) { const char *pathname = p->new_name ? p->new_name : p->old_name; int i; /* Paths outside are not touched regardless of "--include" */ if (state->prefix && *state->prefix) { const char *rest; if (!skip_prefix(pathname, state->prefix, &rest) || !*rest) return 0; } /* See if it matches any of exclude/include rule */ for (i = 0; i < state->limit_by_name.nr; i++) { struct string_list_item *it = &state->limit_by_name.items[i]; if (!wildmatch(it->string, pathname, 0)) return (it->util != NULL); } /* * If we had any include, a path that does not match any rule is * not used. Otherwise, we saw bunch of exclude rules (or none) * and such a path is used. */ return !state->has_include; } /* * Read the patch text in "buffer" that extends for "size" bytes; stop * reading after seeing a single patch (i.e. changes to a single file). * Create fragments (i.e. patch hunks) and hang them to the given patch. * * Returns: * -1 if no header was found or parse_binary() failed, * -128 on another error, * the number of bytes consumed otherwise, * so that the caller can call us again for the next patch. */ static int parse_chunk(struct apply_state *state, char *buffer, unsigned long size, struct patch *patch) { int hdrsize, patchsize; int offset = find_header(state, buffer, size, &hdrsize, patch); if (offset < 0) return offset; prefix_patch(state, patch); if (!use_patch(state, patch)) patch->ws_rule = 0; else if (patch->new_name) patch->ws_rule = whitespace_rule(state->repo->index, patch->new_name); else patch->ws_rule = whitespace_rule(state->repo->index, patch->old_name); patchsize = parse_single_patch(state, buffer + offset + hdrsize, size - offset - hdrsize, patch); if (patchsize < 0) return -128; if (!patchsize) { static const char git_binary[] = "GIT binary patch\n"; int hd = hdrsize + offset; unsigned long llen = linelen(buffer + hd, size - hd); if (llen == sizeof(git_binary) - 1 && !memcmp(git_binary, buffer + hd, llen)) { int used; state->linenr++; used = parse_binary(state, buffer + hd + llen, size - hd - llen, patch); if (used < 0) return -1; if (used) patchsize = used + llen; else patchsize = 0; } else if (!memcmp(" differ\n", buffer + hd + llen - 8, 8)) { static const char *binhdr[] = { "Binary files ", "Files ", NULL, }; int i; for (i = 0; binhdr[i]; i++) { int len = strlen(binhdr[i]); if (len < size - hd && !memcmp(binhdr[i], buffer + hd, len)) { state->linenr++; patch->is_binary = 1; patchsize = llen; break; } } } /* Empty patch cannot be applied if it is a text patch * without metadata change. A binary patch appears * empty to us here. */ if ((state->apply || state->check) && (!patch->is_binary && !metadata_changes(patch))) { error(_("patch with only garbage at line %d"), state->linenr); return -128; } } return offset + hdrsize + patchsize; } static void reverse_patches(struct patch *p) { for (; p; p = p->next) { struct fragment *frag = p->fragments; SWAP(p->new_name, p->old_name); if (p->new_mode) SWAP(p->new_mode, p->old_mode); SWAP(p->is_new, p->is_delete); SWAP(p->lines_added, p->lines_deleted); SWAP(p->old_oid_prefix, p->new_oid_prefix); for (; frag; frag = frag->next) { SWAP(frag->newpos, frag->oldpos); SWAP(frag->newlines, frag->oldlines); } } } static const char pluses[] = "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"; static const char minuses[]= "----------------------------------------------------------------------"; static void show_stats(struct apply_state *state, struct patch *patch) { struct strbuf qname = STRBUF_INIT; char *cp = patch->new_name ? patch->new_name : patch->old_name; int max, add, del; quote_c_style(cp, &qname, NULL, 0); /* * "scale" the filename */ max = state->max_len; if (max > 50) max = 50; if (qname.len > max) { cp = strchr(qname.buf + qname.len + 3 - max, '/'); if (!cp) cp = qname.buf + qname.len + 3 - max; strbuf_splice(&qname, 0, cp - qname.buf, "...", 3); } if (patch->is_binary) { printf(" %-*s | Bin\n", max, qname.buf); strbuf_release(&qname); return; } printf(" %-*s |", max, qname.buf); strbuf_release(&qname); /* * scale the add/delete */ max = max + state->max_change > 70 ? 70 - max : state->max_change; add = patch->lines_added; del = patch->lines_deleted; if (state->max_change > 0) { int total = ((add + del) * max + state->max_change / 2) / state->max_change; add = (add * max + state->max_change / 2) / state->max_change; del = total - add; } printf("%5d %.*s%.*s\n", patch->lines_added + patch->lines_deleted, add, pluses, del, minuses); } static int read_old_data(struct stat *st, struct patch *patch, const char *path, struct strbuf *buf) { int conv_flags = patch->crlf_in_old ? CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE; switch (st->st_mode & S_IFMT) { case S_IFLNK: if (strbuf_readlink(buf, path, st->st_size) < 0) return error(_("unable to read symlink %s"), path); return 0; case S_IFREG: if (strbuf_read_file(buf, path, st->st_size) != st->st_size) return error(_("unable to open or read %s"), path); /* * "git apply" without "--index/--cached" should never look * at the index; the target file may not have been added to * the index yet, and we may not even be in any Git repository. * Pass NULL to convert_to_git() to stress this; the function * should never look at the index when explicit crlf option * is given. */ convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags); return 0; default: return -1; } } /* * Update the preimage, and the common lines in postimage, * from buffer buf of length len. */ static void update_pre_post_images(struct image *preimage, struct image *postimage, char *buf, size_t len) { struct image fixed_preimage = IMAGE_INIT; size_t insert_pos = 0; int i, ctx, reduced; const char *fixed; /* * Update the preimage with whitespace fixes. Note that we * are not losing preimage->buf -- apply_one_fragment() will * free "oldlines". */ image_prepare(&fixed_preimage, buf, len, 1); for (i = 0; i < fixed_preimage.line_nr; i++) fixed_preimage.line[i].flag = preimage->line[i].flag; image_clear(preimage); *preimage = fixed_preimage; fixed = preimage->buf.buf; /* * Adjust the common context lines in postimage. */ for (i = reduced = ctx = 0; i < postimage->line_nr; i++) { size_t l_len = postimage->line[i].len; if (!(postimage->line[i].flag & LINE_COMMON)) { /* an added line -- no counterparts in preimage */ insert_pos += l_len; continue; } /* and find the corresponding one in the fixed preimage */ while (ctx < preimage->line_nr && !(preimage->line[ctx].flag & LINE_COMMON)) { fixed += preimage->line[ctx].len; ctx++; } /* * preimage is expected to run out, if the caller * fixed addition of trailing blank lines. */ if (preimage->line_nr <= ctx) { reduced++; continue; } /* and copy it in, while fixing the line length */ l_len = preimage->line[ctx].len; strbuf_splice(&postimage->buf, insert_pos, postimage->line[i].len, fixed, l_len); insert_pos += l_len; fixed += l_len; postimage->line[i].len = l_len; ctx++; } /* Fix the length of the whole thing */ postimage->line_nr -= reduced; } /* * Compare lines s1 of length n1 and s2 of length n2, ignoring * whitespace difference. Returns 1 if they match, 0 otherwise */ static int fuzzy_matchlines(const char *s1, size_t n1, const char *s2, size_t n2) { const char *end1 = s1 + n1; const char *end2 = s2 + n2; /* ignore line endings */ while (s1 < end1 && (end1[-1] == '\r' || end1[-1] == '\n')) end1--; while (s2 < end2 && (end2[-1] == '\r' || end2[-1] == '\n')) end2--; while (s1 < end1 && s2 < end2) { if (isspace(*s1)) { /* * Skip whitespace. We check on both buffers * because we don't want "a b" to match "ab". */ if (!isspace(*s2)) return 0; while (s1 < end1 && isspace(*s1)) s1++; while (s2 < end2 && isspace(*s2)) s2++; } else if (*s1++ != *s2++) return 0; } /* If we reached the end on one side only, lines don't match. */ return s1 == end1 && s2 == end2; } static int line_by_line_fuzzy_match(struct image *img, struct image *preimage, struct image *postimage, unsigned long current, int current_lno, int preimage_limit) { int i; size_t imgoff = 0; size_t preoff = 0; size_t extra_chars; char *buf; char *preimage_eof; char *preimage_end; struct strbuf fixed; char *fixed_buf; size_t fixed_len; for (i = 0; i < preimage_limit; i++) { size_t prelen = preimage->line[i].len; size_t imglen = img->line[current_lno+i].len; if (!fuzzy_matchlines(img->buf.buf + current + imgoff, imglen, preimage->buf.buf + preoff, prelen)) return 0; imgoff += imglen; preoff += prelen; } /* * Ok, the preimage matches with whitespace fuzz. * * imgoff now holds the true length of the target that * matches the preimage before the end of the file. * * Count the number of characters in the preimage that fall * beyond the end of the file and make sure that all of them * are whitespace characters. (This can only happen if * we are removing blank lines at the end of the file.) */ buf = preimage_eof = preimage->buf.buf + preoff; for ( ; i < preimage->line_nr; i++) preoff += preimage->line[i].len; preimage_end = preimage->buf.buf + preoff; for ( ; buf < preimage_end; buf++) if (!isspace(*buf)) return 0; /* * Update the preimage and the common postimage context * lines to use the same whitespace as the target. * If whitespace is missing in the target (i.e. * if the preimage extends beyond the end of the file), * use the whitespace from the preimage. */ extra_chars = preimage_end - preimage_eof; strbuf_init(&fixed, imgoff + extra_chars); strbuf_add(&fixed, img->buf.buf + current, imgoff); strbuf_add(&fixed, preimage_eof, extra_chars); fixed_buf = strbuf_detach(&fixed, &fixed_len); update_pre_post_images(preimage, postimage, fixed_buf, fixed_len); return 1; } static int match_fragment(struct apply_state *state, struct image *img, struct image *preimage, struct image *postimage, unsigned long current, int current_lno, unsigned ws_rule, int match_beginning, int match_end) { int i; const char *orig, *target; struct strbuf fixed = STRBUF_INIT; char *fixed_buf; size_t fixed_len; int preimage_limit; int ret; if (preimage->line_nr + current_lno <= img->line_nr) { /* * The hunk falls within the boundaries of img. */ preimage_limit = preimage->line_nr; if (match_end && (preimage->line_nr + current_lno != img->line_nr)) { ret = 0; goto out; } } else if (state->ws_error_action == correct_ws_error && (ws_rule & WS_BLANK_AT_EOF)) { /* * This hunk extends beyond the end of img, and we are * removing blank lines at the end of the file. This * many lines from the beginning of the preimage must * match with img, and the remainder of the preimage * must be blank. */ preimage_limit = img->line_nr - current_lno; } else { /* * The hunk extends beyond the end of the img and * we are not removing blanks at the end, so we * should reject the hunk at this position. */ ret = 0; goto out; } if (match_beginning && current_lno) { ret = 0; goto out; } /* Quick hash check */ for (i = 0; i < preimage_limit; i++) { if ((img->line[current_lno + i].flag & LINE_PATCHED) || (preimage->line[i].hash != img->line[current_lno + i].hash)) { ret = 0; goto out; } } if (preimage_limit == preimage->line_nr) { /* * Do we have an exact match? If we were told to match * at the end, size must be exactly at current+fragsize, * otherwise current+fragsize must be still within the preimage, * and either case, the old piece should match the preimage * exactly. */ if ((match_end ? (current + preimage->buf.len == img->buf.len) : (current + preimage->buf.len <= img->buf.len)) && !memcmp(img->buf.buf + current, preimage->buf.buf, preimage->buf.len)) { ret = 1; goto out; } } else { /* * The preimage extends beyond the end of img, so * there cannot be an exact match. * * There must be one non-blank context line that match * a line before the end of img. */ const char *buf, *buf_end; buf = preimage->buf.buf; buf_end = buf; for (i = 0; i < preimage_limit; i++) buf_end += preimage->line[i].len; for ( ; buf < buf_end; buf++) if (!isspace(*buf)) break; if (buf == buf_end) { ret = 0; goto out; } } /* * No exact match. If we are ignoring whitespace, run a line-by-line * fuzzy matching. We collect all the line length information because * we need it to adjust whitespace if we match. */ if (state->ws_ignore_action == ignore_ws_change) { ret = line_by_line_fuzzy_match(img, preimage, postimage, current, current_lno, preimage_limit); goto out; } if (state->ws_error_action != correct_ws_error) { ret = 0; goto out; } /* * The hunk does not apply byte-by-byte, but the hash says * it might with whitespace fuzz. We weren't asked to * ignore whitespace, we were asked to correct whitespace * errors, so let's try matching after whitespace correction. * * While checking the preimage against the target, whitespace * errors in both fixed, we count how large the corresponding * postimage needs to be. The postimage prepared by * apply_one_fragment() has whitespace errors fixed on added * lines already, but the common lines were propagated as-is, * which may become longer when their whitespace errors are * fixed. */ /* * The preimage may extend beyond the end of the file, * but in this loop we will only handle the part of the * preimage that falls within the file. */ strbuf_grow(&fixed, preimage->buf.len + 1); orig = preimage->buf.buf; target = img->buf.buf + current; for (i = 0; i < preimage_limit; i++) { size_t oldlen = preimage->line[i].len; size_t tgtlen = img->line[current_lno + i].len; size_t fixstart = fixed.len; struct strbuf tgtfix; int match; /* Try fixing the line in the preimage */ ws_fix_copy(&fixed, orig, oldlen, ws_rule, NULL); /* Try fixing the line in the target */ strbuf_init(&tgtfix, tgtlen); ws_fix_copy(&tgtfix, target, tgtlen, ws_rule, NULL); /* * If they match, either the preimage was based on * a version before our tree fixed whitespace breakage, * or we are lacking a whitespace-fix patch the tree * the preimage was based on already had (i.e. target * has whitespace breakage, the preimage doesn't). * In either case, we are fixing the whitespace breakages * so we might as well take the fix together with their * real change. */ match = (tgtfix.len == fixed.len - fixstart && !memcmp(tgtfix.buf, fixed.buf + fixstart, fixed.len - fixstart)); strbuf_release(&tgtfix); if (!match) { ret = 0; goto out; } orig += oldlen; target += tgtlen; } /* * Now handle the lines in the preimage that falls beyond the * end of the file (if any). They will only match if they are * empty or only contain whitespace (if WS_BLANK_AT_EOL is * false). */ for ( ; i < preimage->line_nr; i++) { size_t fixstart = fixed.len; /* start of the fixed preimage */ size_t oldlen = preimage->line[i].len; int j; /* Try fixing the line in the preimage */ ws_fix_copy(&fixed, orig, oldlen, ws_rule, NULL); for (j = fixstart; j < fixed.len; j++) { if (!isspace(fixed.buf[j])) { ret = 0; goto out; } } orig += oldlen; } /* * Yes, the preimage is based on an older version that still * has whitespace breakages unfixed, and fixing them makes the * hunk match. Update the context lines in the postimage. */ fixed_buf = strbuf_detach(&fixed, &fixed_len); update_pre_post_images(preimage, postimage, fixed_buf, fixed_len); ret = 1; out: strbuf_release(&fixed); return ret; } static int find_pos(struct apply_state *state, struct image *img, struct image *preimage, struct image *postimage, int line, unsigned ws_rule, int match_beginning, int match_end) { int i; unsigned long backwards, forwards, current; int backwards_lno, forwards_lno, current_lno; /* * When running with --allow-overlap, it is possible that a hunk is * seen that pretends to start at the beginning (but no longer does), * and that *still* needs to match the end. So trust `match_end` more * than `match_beginning`. */ if (state->allow_overlap && match_beginning && match_end && img->line_nr - preimage->line_nr != 0) match_beginning = 0; /* * If match_beginning or match_end is specified, there is no * point starting from a wrong line that will never match and * wander around and wait for a match at the specified end. */ if (match_beginning) line = 0; else if (match_end) line = img->line_nr - preimage->line_nr; /* * Because the comparison is unsigned, the following test * will also take care of a negative line number that can * result when match_end and preimage is larger than the target. */ if ((size_t) line > img->line_nr) line = img->line_nr; current = 0; for (i = 0; i < line; i++) current += img->line[i].len; /* * There's probably some smart way to do this, but I'll leave * that to the smart and beautiful people. I'm simple and stupid. */ backwards = current; backwards_lno = line; forwards = current; forwards_lno = line; current_lno = line; for (i = 0; ; i++) { if (match_fragment(state, img, preimage, postimage, current, current_lno, ws_rule, match_beginning, match_end)) return current_lno; again: if (backwards_lno == 0 && forwards_lno == img->line_nr) break; if (i & 1) { if (backwards_lno == 0) { i++; goto again; } backwards_lno--; backwards -= img->line[backwards_lno].len; current = backwards; current_lno = backwards_lno; } else { if (forwards_lno == img->line_nr) { i++; goto again; } forwards += img->line[forwards_lno].len; forwards_lno++; current = forwards; current_lno = forwards_lno; } } return -1; } /* * The change from "preimage" and "postimage" has been found to * apply at applied_pos (counts in line numbers) in "img". * Update "img" to remove "preimage" and replace it with "postimage". */ static void update_image(struct apply_state *state, struct image *img, int applied_pos, struct image *preimage, struct image *postimage) { /* * remove the copy of preimage at offset in img * and replace it with postimage */ int i, nr; size_t remove_count, insert_count, applied_at = 0; size_t result_alloc; char *result; int preimage_limit; /* * If we are removing blank lines at the end of img, * the preimage may extend beyond the end. * If that is the case, we must be careful only to * remove the part of the preimage that falls within * the boundaries of img. Initialize preimage_limit * to the number of lines in the preimage that falls * within the boundaries. */ preimage_limit = preimage->line_nr; if (preimage_limit > img->line_nr - applied_pos) preimage_limit = img->line_nr - applied_pos; for (i = 0; i < applied_pos; i++) applied_at += img->line[i].len; remove_count = 0; for (i = 0; i < preimage_limit; i++) remove_count += img->line[applied_pos + i].len; insert_count = postimage->buf.len; /* Adjust the contents */ result_alloc = st_add3(st_sub(img->buf.len, remove_count), insert_count, 1); result = xmalloc(result_alloc); memcpy(result, img->buf.buf, applied_at); memcpy(result + applied_at, postimage->buf.buf, postimage->buf.len); memcpy(result + applied_at + postimage->buf.len, img->buf.buf + (applied_at + remove_count), img->buf.len - (applied_at + remove_count)); strbuf_attach(&img->buf, result, postimage->buf.len + img->buf.len - remove_count, result_alloc); /* Adjust the line table */ nr = img->line_nr + postimage->line_nr - preimage_limit; if (preimage_limit < postimage->line_nr) /* * NOTE: this knows that we never call image_remove_first_line() * on anything other than pre/post image. */ REALLOC_ARRAY(img->line, nr); if (preimage_limit != postimage->line_nr) MOVE_ARRAY(img->line + applied_pos + postimage->line_nr, img->line + applied_pos + preimage_limit, img->line_nr - (applied_pos + preimage_limit)); COPY_ARRAY(img->line + applied_pos, postimage->line, postimage->line_nr); if (!state->allow_overlap) for (i = 0; i < postimage->line_nr; i++) img->line[applied_pos + i].flag |= LINE_PATCHED; img->line_nr = nr; } /* * Use the patch-hunk text in "frag" to prepare two images (preimage and * postimage) for the hunk. Find lines that match "preimage" in "img" and * replace the part of "img" with "postimage" text. */ static int apply_one_fragment(struct apply_state *state, struct image *img, struct fragment *frag, int inaccurate_eof, unsigned ws_rule, int nth_fragment) { int match_beginning, match_end; const char *patch = frag->patch; int size = frag->size; char *old, *oldlines; struct strbuf newlines; int new_blank_lines_at_end = 0; int found_new_blank_lines_at_end = 0; int hunk_linenr = frag->linenr; unsigned long leading, trailing; int pos, applied_pos; struct image preimage = IMAGE_INIT; struct image postimage = IMAGE_INIT; oldlines = xmalloc(size); strbuf_init(&newlines, size); old = oldlines; while (size > 0) { char first; int len = linelen(patch, size); int plen; int added_blank_line = 0; int is_blank_context = 0; size_t start; if (!len) break; /* * "plen" is how much of the line we should use for * the actual patch data. Normally we just remove the * first character on the line, but if the line is * followed by "\ No newline", then we also remove the * last one (which is the newline, of course). */ plen = len - 1; if (len < size && patch[len] == '\\') plen--; first = *patch; if (state->apply_in_reverse) { if (first == '-') first = '+'; else if (first == '+') first = '-'; } switch (first) { case '\n': /* Newer GNU diff, empty context line */ if (plen < 0) /* ... followed by '\No newline'; nothing */ break; *old++ = '\n'; strbuf_addch(&newlines, '\n'); image_add_line(&preimage, "\n", 1, LINE_COMMON); image_add_line(&postimage, "\n", 1, LINE_COMMON); is_blank_context = 1; break; case ' ': if (plen && (ws_rule & WS_BLANK_AT_EOF) && ws_blank_line(patch + 1, plen)) is_blank_context = 1; /* fallthrough */ case '-': memcpy(old, patch + 1, plen); image_add_line(&preimage, old, plen, (first == ' ' ? LINE_COMMON : 0)); old += plen; if (first == '-') break; /* fallthrough */ case '+': /* --no-add does not add new lines */ if (first == '+' && state->no_add) break; start = newlines.len; if (first != '+' || !state->whitespace_error || state->ws_error_action != correct_ws_error) { strbuf_add(&newlines, patch + 1, plen); } else { ws_fix_copy(&newlines, patch + 1, plen, ws_rule, &state->applied_after_fixing_ws); } image_add_line(&postimage, newlines.buf + start, newlines.len - start, (first == '+' ? 0 : LINE_COMMON)); if (first == '+' && (ws_rule & WS_BLANK_AT_EOF) && ws_blank_line(patch + 1, plen)) added_blank_line = 1; break; case '@': case '\\': /* Ignore it, we already handled it */ break; default: if (state->apply_verbosity > verbosity_normal) error(_("invalid start of line: '%c'"), first); applied_pos = -1; goto out; } if (added_blank_line) { if (!new_blank_lines_at_end) found_new_blank_lines_at_end = hunk_linenr; new_blank_lines_at_end++; } else if (is_blank_context) ; else new_blank_lines_at_end = 0; patch += len; size -= len; hunk_linenr++; } if (inaccurate_eof && old > oldlines && old[-1] == '\n' && newlines.len > 0 && newlines.buf[newlines.len - 1] == '\n') { old--; strbuf_setlen(&newlines, newlines.len - 1); preimage.line[preimage.line_nr - 1].len--; postimage.line[postimage.line_nr - 1].len--; } leading = frag->leading; trailing = frag->trailing; /* * A hunk to change lines at the beginning would begin with * @@ -1,L +N,M @@ * but we need to be careful. -U0 that inserts before the second * line also has this pattern. * * And a hunk to add to an empty file would begin with * @@ -0,0 +N,M @@ * * In other words, a hunk that is (frag->oldpos <= 1) with or * without leading context must match at the beginning. */ match_beginning = (!frag->oldpos || (frag->oldpos == 1 && !state->unidiff_zero)); /* * A hunk without trailing lines must match at the end. * However, we simply cannot tell if a hunk must match end * from the lack of trailing lines if the patch was generated * with unidiff without any context. */ match_end = !state->unidiff_zero && !trailing; pos = frag->newpos ? (frag->newpos - 1) : 0; strbuf_add(&preimage.buf, oldlines, old - oldlines); strbuf_swap(&postimage.buf, &newlines); for (;;) { applied_pos = find_pos(state, img, &preimage, &postimage, pos, ws_rule, match_beginning, match_end); if (applied_pos >= 0) break; /* Am I at my context limits? */ if ((leading <= state->p_context) && (trailing <= state->p_context)) break; if (match_beginning || match_end) { match_beginning = match_end = 0; continue; } /* * Reduce the number of context lines; reduce both * leading and trailing if they are equal otherwise * just reduce the larger context. */ if (leading >= trailing) { image_remove_first_line(&preimage); image_remove_first_line(&postimage); pos--; leading--; } if (trailing > leading) { image_remove_last_line(&preimage); image_remove_last_line(&postimage); trailing--; } } if (applied_pos >= 0) { if (new_blank_lines_at_end && preimage.line_nr + applied_pos >= img->line_nr && (ws_rule & WS_BLANK_AT_EOF) && state->ws_error_action != nowarn_ws_error) { record_ws_error(state, WS_BLANK_AT_EOF, "+", 1, found_new_blank_lines_at_end); if (state->ws_error_action == correct_ws_error) { while (new_blank_lines_at_end--) image_remove_last_line(&postimage); } /* * We would want to prevent write_out_results() * from taking place in apply_patch() that follows * the callchain led us here, which is: * apply_patch->check_patch_list->check_patch-> * apply_data->apply_fragments->apply_one_fragment */ if (state->ws_error_action == die_on_ws_error) state->apply = 0; } if (state->apply_verbosity > verbosity_normal && applied_pos != pos) { int offset = applied_pos - pos; if (state->apply_in_reverse) offset = 0 - offset; fprintf_ln(stderr, Q_("Hunk #%d succeeded at %d (offset %d line).", "Hunk #%d succeeded at %d (offset %d lines).", offset), nth_fragment, applied_pos + 1, offset); } /* * Warn if it was necessary to reduce the number * of context lines. */ if ((leading != frag->leading || trailing != frag->trailing) && state->apply_verbosity > verbosity_silent) fprintf_ln(stderr, _("Context reduced to (%ld/%ld)" " to apply fragment at %d"), leading, trailing, applied_pos+1); update_image(state, img, applied_pos, &preimage, &postimage); } else { if (state->apply_verbosity > verbosity_normal) error(_("while searching for:\n%.*s"), (int)(old - oldlines), oldlines); } out: free(oldlines); strbuf_release(&newlines); image_clear(&preimage); image_clear(&postimage); return (applied_pos < 0); } static int apply_binary_fragment(struct apply_state *state, struct image *img, struct patch *patch) { struct fragment *fragment = patch->fragments; unsigned long len; void *dst; if (!fragment) return error(_("missing binary patch data for '%s'"), patch->new_name ? patch->new_name : patch->old_name); /* Binary patch is irreversible without the optional second hunk */ if (state->apply_in_reverse) { if (!fragment->next) return error(_("cannot reverse-apply a binary patch " "without the reverse hunk to '%s'"), patch->new_name ? patch->new_name : patch->old_name); fragment = fragment->next; } switch (fragment->binary_patch_method) { case BINARY_DELTA_DEFLATED: dst = patch_delta(img->buf.buf, img->buf.len, fragment->patch, fragment->size, &len); if (!dst) return -1; image_clear(img); strbuf_attach(&img->buf, dst, len, len + 1); return 0; case BINARY_LITERAL_DEFLATED: image_clear(img); strbuf_add(&img->buf, fragment->patch, fragment->size); return 0; } return -1; } /* * Replace "img" with the result of applying the binary patch. * The binary patch data itself in patch->fragment is still kept * but the preimage prepared by the caller in "img" is freed here * or in the helper function apply_binary_fragment() this calls. */ static int apply_binary(struct apply_state *state, struct image *img, struct patch *patch) { const char *name = patch->old_name ? patch->old_name : patch->new_name; struct object_id oid; const unsigned hexsz = the_hash_algo->hexsz; /* * For safety, we require patch index line to contain * full hex textual object ID for old and new, at least for now. */ if (strlen(patch->old_oid_prefix) != hexsz || strlen(patch->new_oid_prefix) != hexsz || get_oid_hex(patch->old_oid_prefix, &oid) || get_oid_hex(patch->new_oid_prefix, &oid)) return error(_("cannot apply binary patch to '%s' " "without full index line"), name); if (patch->old_name) { /* * See if the old one matches what the patch * applies to. */ hash_object_file(the_hash_algo, img->buf.buf, img->buf.len, OBJ_BLOB, &oid); if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix)) return error(_("the patch applies to '%s' (%s), " "which does not match the " "current contents."), name, oid_to_hex(&oid)); } else { /* Otherwise, the old one must be empty. */ if (img->buf.len) return error(_("the patch applies to an empty " "'%s' but it is not empty"), name); } get_oid_hex(patch->new_oid_prefix, &oid); if (is_null_oid(&oid)) { image_clear(img); return 0; /* deletion patch */ } if (has_object(the_repository, &oid, 0)) { /* We already have the postimage */ enum object_type type; unsigned long size; char *result; result = repo_read_object_file(the_repository, &oid, &type, &size); if (!result) return error(_("the necessary postimage %s for " "'%s' cannot be read"), patch->new_oid_prefix, name); image_clear(img); strbuf_attach(&img->buf, result, size, size + 1); } else { /* * We have verified buf matches the preimage; * apply the patch data to it, which is stored * in the patch->fragments->{patch,size}. */ if (apply_binary_fragment(state, img, patch)) return error(_("binary patch does not apply to '%s'"), name); /* verify that the result matches */ hash_object_file(the_hash_algo, img->buf.buf, img->buf.len, OBJ_BLOB, &oid); if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix)) return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"), name, patch->new_oid_prefix, oid_to_hex(&oid)); } return 0; } static int apply_fragments(struct apply_state *state, struct image *img, struct patch *patch) { struct fragment *frag = patch->fragments; const char *name = patch->old_name ? patch->old_name : patch->new_name; unsigned ws_rule = patch->ws_rule; unsigned inaccurate_eof = patch->inaccurate_eof; int nth = 0; if (patch->is_binary) return apply_binary(state, img, patch); while (frag) { nth++; if (apply_one_fragment(state, img, frag, inaccurate_eof, ws_rule, nth)) { error(_("patch failed: %s:%ld"), name, frag->oldpos); if (!state->apply_with_reject) return -1; frag->rejected = 1; } frag = frag->next; } return 0; } static int read_blob_object(struct strbuf *buf, const struct object_id *oid, unsigned mode) { if (S_ISGITLINK(mode)) { strbuf_grow(buf, 100); strbuf_addf(buf, "Subproject commit %s\n", oid_to_hex(oid)); } else { enum object_type type; unsigned long sz; char *result; result = repo_read_object_file(the_repository, oid, &type, &sz); if (!result) return -1; /* XXX read_sha1_file NUL-terminates */ strbuf_attach(buf, result, sz, sz + 1); } return 0; } static int read_file_or_gitlink(const struct cache_entry *ce, struct strbuf *buf) { if (!ce) return 0; return read_blob_object(buf, &ce->oid, ce->ce_mode); } static struct patch *in_fn_table(struct apply_state *state, const char *name) { struct string_list_item *item; if (!name) return NULL; item = string_list_lookup(&state->fn_table, name); if (item) return (struct patch *)item->util; return NULL; } /* * item->util in the filename table records the status of the path. * Usually it points at a patch (whose result records the contents * of it after applying it), but it could be PATH_WAS_DELETED for a * path that a previously applied patch has already removed, or * PATH_TO_BE_DELETED for a path that a later patch would remove. * * The latter is needed to deal with a case where two paths A and B * are swapped by first renaming A to B and then renaming B to A; * moving A to B should not be prevented due to presence of B as we * will remove it in a later patch. */ #define PATH_TO_BE_DELETED ((struct patch *) -2) #define PATH_WAS_DELETED ((struct patch *) -1) static int to_be_deleted(struct patch *patch) { return patch == PATH_TO_BE_DELETED; } static int was_deleted(struct patch *patch) { return patch == PATH_WAS_DELETED; } static void add_to_fn_table(struct apply_state *state, struct patch *patch) { struct string_list_item *item; /* * Always add new_name unless patch is a deletion * This should cover the cases for normal diffs, * file creations and copies */ if (patch->new_name) { item = string_list_insert(&state->fn_table, patch->new_name); item->util = patch; } /* * store a failure on rename/deletion cases because * later chunks shouldn't patch old names */ if ((patch->new_name == NULL) || (patch->is_rename)) { item = string_list_insert(&state->fn_table, patch->old_name); item->util = PATH_WAS_DELETED; } } static void prepare_fn_table(struct apply_state *state, struct patch *patch) { /* * store information about incoming file deletion */ while (patch) { if ((patch->new_name == NULL) || (patch->is_rename)) { struct string_list_item *item; item = string_list_insert(&state->fn_table, patch->old_name); item->util = PATH_TO_BE_DELETED; } patch = patch->next; } } static int checkout_target(struct index_state *istate, struct cache_entry *ce, struct stat *st) { struct checkout costate = CHECKOUT_INIT; costate.refresh_cache = 1; costate.istate = istate; if (checkout_entry(ce, &costate, NULL, NULL) || lstat(ce->name, st)) return error(_("cannot checkout %s"), ce->name); return 0; } static struct patch *previous_patch(struct apply_state *state, struct patch *patch, int *gone) { struct patch *previous; *gone = 0; if (patch->is_copy || patch->is_rename) return NULL; /* "git" patches do not depend on the order */ previous = in_fn_table(state, patch->old_name); if (!previous) return NULL; if (to_be_deleted(previous)) return NULL; /* the deletion hasn't happened yet */ if (was_deleted(previous)) *gone = 1; return previous; } static int verify_index_match(struct apply_state *state, const struct cache_entry *ce, struct stat *st) { if (S_ISGITLINK(ce->ce_mode)) { if (!S_ISDIR(st->st_mode)) return -1; return 0; } return ie_match_stat(state->repo->index, ce, st, CE_MATCH_IGNORE_VALID | CE_MATCH_IGNORE_SKIP_WORKTREE); } #define SUBMODULE_PATCH_WITHOUT_INDEX 1 static int load_patch_target(struct apply_state *state, struct strbuf *buf, const struct cache_entry *ce, struct stat *st, struct patch *patch, const char *name, unsigned expected_mode) { if (state->cached || state->check_index) { if (read_file_or_gitlink(ce, buf)) return error(_("failed to read %s"), name); } else if (name) { if (S_ISGITLINK(expected_mode)) { if (ce) return read_file_or_gitlink(ce, buf); else return SUBMODULE_PATCH_WITHOUT_INDEX; } else if (has_symlink_leading_path(name, strlen(name))) { return error(_("reading from '%s' beyond a symbolic link"), name); } else { if (read_old_data(st, patch, name, buf)) return error(_("failed to read %s"), name); } } return 0; } /* * We are about to apply "patch"; populate the "image" with the * current version we have, from the working tree or from the index, * depending on the situation e.g. --cached/--index. If we are * applying a non-git patch that incrementally updates the tree, * we read from the result of a previous diff. */ static int load_preimage(struct apply_state *state, struct image *image, struct patch *patch, struct stat *st, const struct cache_entry *ce) { struct strbuf buf = STRBUF_INIT; size_t len; char *img; struct patch *previous; int status; previous = previous_patch(state, patch, &status); if (status) return error(_("path %s has been renamed/deleted"), patch->old_name); if (previous) { /* We have a patched copy in memory; use that. */ strbuf_add(&buf, previous->result, previous->resultsize); } else { status = load_patch_target(state, &buf, ce, st, patch, patch->old_name, patch->old_mode); if (status < 0) return status; else if (status == SUBMODULE_PATCH_WITHOUT_INDEX) { /* * There is no way to apply subproject * patch without looking at the index. * NEEDSWORK: shouldn't this be flagged * as an error??? */ free_fragment_list(patch->fragments); patch->fragments = NULL; } else if (status) { return error(_("failed to read %s"), patch->old_name); } } img = strbuf_detach(&buf, &len); image_prepare(image, img, len, !patch->is_binary); return 0; } static int resolve_to(struct image *image, const struct object_id *result_id) { unsigned long size; enum object_type type; char *data; image_clear(image); data = repo_read_object_file(the_repository, result_id, &type, &size); if (!data || type != OBJ_BLOB) die("unable to read blob object %s", oid_to_hex(result_id)); strbuf_attach(&image->buf, data, size, size + 1); return 0; } static int three_way_merge(struct apply_state *state, struct image *image, char *path, const struct object_id *base, const struct object_id *ours, const struct object_id *theirs) { mmfile_t base_file, our_file, their_file; struct ll_merge_options merge_opts = LL_MERGE_OPTIONS_INIT; mmbuffer_t result = { NULL }; enum ll_merge_result status; /* resolve trivial cases first */ if (oideq(base, ours)) return resolve_to(image, theirs); else if (oideq(base, theirs) || oideq(ours, theirs)) return resolve_to(image, ours); read_mmblob(&base_file, base); read_mmblob(&our_file, ours); read_mmblob(&their_file, theirs); merge_opts.variant = state->merge_variant; status = ll_merge(&result, path, &base_file, "base", &our_file, "ours", &their_file, "theirs", state->repo->index, &merge_opts); if (status == LL_MERGE_BINARY_CONFLICT) warning("Cannot merge binary files: %s (%s vs. %s)", path, "ours", "theirs"); free(base_file.ptr); free(our_file.ptr); free(their_file.ptr); if (status < 0 || !result.ptr) { free(result.ptr); return -1; } image_clear(image); strbuf_attach(&image->buf, result.ptr, result.size, result.size); return status; } /* * When directly falling back to add/add three-way merge, we read from * the current contents of the new_name. In no cases other than that * this function will be called. */ static int load_current(struct apply_state *state, struct image *image, struct patch *patch) { struct strbuf buf = STRBUF_INIT; int status, pos; size_t len; char *img; struct stat st; struct cache_entry *ce; char *name = patch->new_name; unsigned mode = patch->new_mode; if (!patch->is_new) BUG("patch to %s is not a creation", patch->old_name); pos = index_name_pos(state->repo->index, name, strlen(name)); if (pos < 0) return error(_("%s: does not exist in index"), name); ce = state->repo->index->cache[pos]; if (lstat(name, &st)) { if (errno != ENOENT) return error_errno("%s", name); if (checkout_target(state->repo->index, ce, &st)) return -1; } if (verify_index_match(state, ce, &st)) return error(_("%s: does not match index"), name); status = load_patch_target(state, &buf, ce, &st, patch, name, mode); if (status < 0) return status; else if (status) return -1; img = strbuf_detach(&buf, &len); image_prepare(image, img, len, !patch->is_binary); return 0; } static int try_threeway(struct apply_state *state, struct image *image, struct patch *patch, struct stat *st, const struct cache_entry *ce) { struct object_id pre_oid, post_oid, our_oid; struct strbuf buf = STRBUF_INIT; size_t len; int status; char *img; struct image tmp_image = IMAGE_INIT; /* No point falling back to 3-way merge in these cases */ if (patch->is_delete || S_ISGITLINK(patch->old_mode) || S_ISGITLINK(patch->new_mode) || (patch->is_new && !patch->direct_to_threeway) || (patch->is_rename && !patch->lines_added && !patch->lines_deleted)) return -1; /* Preimage the patch was prepared for */ if (patch->is_new) write_object_file("", 0, OBJ_BLOB, &pre_oid); else if (repo_get_oid(the_repository, patch->old_oid_prefix, &pre_oid) || read_blob_object(&buf, &pre_oid, patch->old_mode)) return error(_("repository lacks the necessary blob to perform 3-way merge.")); if (state->apply_verbosity > verbosity_silent && patch->direct_to_threeway) fprintf(stderr, _("Performing three-way merge...\n")); img = strbuf_detach(&buf, &len); image_prepare(&tmp_image, img, len, 1); /* Apply the patch to get the post image */ if (apply_fragments(state, &tmp_image, patch) < 0) { image_clear(&tmp_image); return -1; } /* post_oid is theirs */ write_object_file(tmp_image.buf.buf, tmp_image.buf.len, OBJ_BLOB, &post_oid); image_clear(&tmp_image); /* our_oid is ours */ if (patch->is_new) { if (load_current(state, &tmp_image, patch)) return error(_("cannot read the current contents of '%s'"), patch->new_name); } else { if (load_preimage(state, &tmp_image, patch, st, ce)) return error(_("cannot read the current contents of '%s'"), patch->old_name); } write_object_file(tmp_image.buf.buf, tmp_image.buf.len, OBJ_BLOB, &our_oid); image_clear(&tmp_image); /* in-core three-way merge between post and our using pre as base */ status = three_way_merge(state, image, patch->new_name, &pre_oid, &our_oid, &post_oid); if (status < 0) { if (state->apply_verbosity > verbosity_silent) fprintf(stderr, _("Failed to perform three-way merge...\n")); return status; } if (status) { patch->conflicted_threeway = 1; if (patch->is_new) oidclr(&patch->threeway_stage[0], the_repository->hash_algo); else oidcpy(&patch->threeway_stage[0], &pre_oid); oidcpy(&patch->threeway_stage[1], &our_oid); oidcpy(&patch->threeway_stage[2], &post_oid); if (state->apply_verbosity > verbosity_silent) fprintf(stderr, _("Applied patch to '%s' with conflicts.\n"), patch->new_name); } else { if (state->apply_verbosity > verbosity_silent) fprintf(stderr, _("Applied patch to '%s' cleanly.\n"), patch->new_name); } return 0; } static int apply_data(struct apply_state *state, struct patch *patch, struct stat *st, const struct cache_entry *ce) { struct image image = IMAGE_INIT; if (load_preimage(state, &image, patch, st, ce) < 0) return -1; if (!state->threeway || try_threeway(state, &image, patch, st, ce) < 0) { if (state->apply_verbosity > verbosity_silent && state->threeway && !patch->direct_to_threeway) fprintf(stderr, _("Falling back to direct application...\n")); /* Note: with --reject, apply_fragments() returns 0 */ if (patch->direct_to_threeway || apply_fragments(state, &image, patch) < 0) { image_clear(&image); return -1; } } patch->result = strbuf_detach(&image.buf, &patch->resultsize); add_to_fn_table(state, patch); free(image.line); if (0 < patch->is_delete && patch->resultsize) return error(_("removal patch leaves file contents")); return 0; } /* * If "patch" that we are looking at modifies or deletes what we have, * we would want it not to lose any local modification we have, either * in the working tree or in the index. * * This also decides if a non-git patch is a creation patch or a * modification to an existing empty file. We do not check the state * of the current tree for a creation patch in this function; the caller * check_patch() separately makes sure (and errors out otherwise) that * the path the patch creates does not exist in the current tree. */ static int check_preimage(struct apply_state *state, struct patch *patch, struct cache_entry **ce, struct stat *st) { const char *old_name = patch->old_name; struct patch *previous = NULL; int stat_ret = 0, status; unsigned st_mode = 0; if (!old_name) return 0; assert(patch->is_new <= 0); previous = previous_patch(state, patch, &status); if (status) return error(_("path %s has been renamed/deleted"), old_name); if (previous) { st_mode = previous->new_mode; } else if (!state->cached) { stat_ret = lstat(old_name, st); if (stat_ret && errno != ENOENT) return error_errno("%s", old_name); } if (state->check_index && !previous) { int pos = index_name_pos(state->repo->index, old_name, strlen(old_name)); if (pos < 0) { if (patch->is_new < 0) goto is_new; return error(_("%s: does not exist in index"), old_name); } *ce = state->repo->index->cache[pos]; if (stat_ret < 0) { if (checkout_target(state->repo->index, *ce, st)) return -1; } if (!state->cached && verify_index_match(state, *ce, st)) return error(_("%s: does not match index"), old_name); if (state->cached) st_mode = (*ce)->ce_mode; } else if (stat_ret < 0) { if (patch->is_new < 0) goto is_new; return error_errno("%s", old_name); } if (!state->cached && !previous) { if (*ce && !(*ce)->ce_mode) BUG("ce_mode == 0 for path '%s'", old_name); if (trust_executable_bit) st_mode = ce_mode_from_stat(*ce, st->st_mode); else if (*ce) st_mode = (*ce)->ce_mode; else st_mode = patch->old_mode; } if (patch->is_new < 0) patch->is_new = 0; if (!patch->old_mode) patch->old_mode = st_mode; if ((st_mode ^ patch->old_mode) & S_IFMT) return error(_("%s: wrong type"), old_name); if (st_mode != patch->old_mode) warning(_("%s has type %o, expected %o"), old_name, st_mode, patch->old_mode); if (!patch->new_mode && !patch->is_delete) patch->new_mode = st_mode; return 0; is_new: patch->is_new = 1; patch->is_delete = 0; FREE_AND_NULL(patch->old_name); return 0; } #define EXISTS_IN_INDEX 1 #define EXISTS_IN_WORKTREE 2 #define EXISTS_IN_INDEX_AS_ITA 3 static int check_to_create(struct apply_state *state, const char *new_name, int ok_if_exists) { struct stat nst; if (state->check_index && (!ok_if_exists || !state->cached)) { int pos; pos = index_name_pos(state->repo->index, new_name, strlen(new_name)); if (pos >= 0) { struct cache_entry *ce = state->repo->index->cache[pos]; /* allow ITA, as they do not yet exist in the index */ if (!ok_if_exists && !(ce->ce_flags & CE_INTENT_TO_ADD)) return EXISTS_IN_INDEX; /* ITA entries can never match working tree files */ if (!state->cached && (ce->ce_flags & CE_INTENT_TO_ADD)) return EXISTS_IN_INDEX_AS_ITA; } } if (state->cached) return 0; if (!lstat(new_name, &nst)) { if (S_ISDIR(nst.st_mode) || ok_if_exists) return 0; /* * A leading component of new_name might be a symlink * that is going to be removed with this patch, but * still pointing at somewhere that has the path. * In such a case, path "new_name" does not exist as * far as git is concerned. */ if (has_symlink_leading_path(new_name, strlen(new_name))) return 0; return EXISTS_IN_WORKTREE; } else if (!is_missing_file_error(errno)) { return error_errno("%s", new_name); } return 0; } static void prepare_symlink_changes(struct apply_state *state, struct patch *patch) { for ( ; patch; patch = patch->next) { if ((patch->old_name && S_ISLNK(patch->old_mode)) && (patch->is_rename || patch->is_delete)) /* the symlink at patch->old_name is removed */ strset_add(&state->removed_symlinks, patch->old_name); if (patch->new_name && S_ISLNK(patch->new_mode)) /* the symlink at patch->new_name is created or remains */ strset_add(&state->kept_symlinks, patch->new_name); } } static int path_is_beyond_symlink_1(struct apply_state *state, struct strbuf *name) { do { while (--name->len && name->buf[name->len] != '/') ; /* scan backwards */ if (!name->len) break; name->buf[name->len] = '\0'; if (strset_contains(&state->kept_symlinks, name->buf)) return 1; if (strset_contains(&state->removed_symlinks, name->buf)) /* * This cannot be "return 0", because we may * see a new one created at a higher level. */ continue; /* otherwise, check the preimage */ if (state->check_index) { struct cache_entry *ce; ce = index_file_exists(state->repo->index, name->buf, name->len, ignore_case); if (ce && S_ISLNK(ce->ce_mode)) return 1; } else { struct stat st; if (!lstat(name->buf, &st) && S_ISLNK(st.st_mode)) return 1; } } while (1); return 0; } static int path_is_beyond_symlink(struct apply_state *state, const char *name_) { int ret; struct strbuf name = STRBUF_INIT; assert(*name_ != '\0'); strbuf_addstr(&name, name_); ret = path_is_beyond_symlink_1(state, &name); strbuf_release(&name); return ret; } static int check_unsafe_path(struct patch *patch) { const char *old_name = NULL; const char *new_name = NULL; if (patch->is_delete) old_name = patch->old_name; else if (!patch->is_new && !patch->is_copy) old_name = patch->old_name; if (!patch->is_delete) new_name = patch->new_name; if (old_name && !verify_path(old_name, patch->old_mode)) return error(_("invalid path '%s'"), old_name); if (new_name && !verify_path(new_name, patch->new_mode)) return error(_("invalid path '%s'"), new_name); return 0; } /* * Check and apply the patch in-core; leave the result in patch->result * for the caller to write it out to the final destination. */ static int check_patch(struct apply_state *state, struct patch *patch) { struct stat st; const char *old_name = patch->old_name; const char *new_name = patch->new_name; const char *name = old_name ? old_name : new_name; struct cache_entry *ce = NULL; struct patch *tpatch; int ok_if_exists; int status; patch->rejected = 1; /* we will drop this after we succeed */ status = check_preimage(state, patch, &ce, &st); if (status) return status; old_name = patch->old_name; /* * A type-change diff is always split into a patch to delete * old, immediately followed by a patch to create new (see * diff.c::run_diff()); in such a case it is Ok that the entry * to be deleted by the previous patch is still in the working * tree and in the index. * * A patch to swap-rename between A and B would first rename A * to B and then rename B to A. While applying the first one, * the presence of B should not stop A from getting renamed to * B; ask to_be_deleted() about the later rename. Removal of * B and rename from A to B is handled the same way by asking * was_deleted(). */ if ((tpatch = in_fn_table(state, new_name)) && (was_deleted(tpatch) || to_be_deleted(tpatch))) ok_if_exists = 1; else ok_if_exists = 0; if (new_name && ((0 < patch->is_new) || patch->is_rename || patch->is_copy)) { int err = check_to_create(state, new_name, ok_if_exists); if (err && state->threeway) { patch->direct_to_threeway = 1; } else switch (err) { case 0: break; /* happy */ case EXISTS_IN_INDEX: return error(_("%s: already exists in index"), new_name); case EXISTS_IN_INDEX_AS_ITA: return error(_("%s: does not match index"), new_name); case EXISTS_IN_WORKTREE: return error(_("%s: already exists in working directory"), new_name); default: return err; } if (!patch->new_mode) { if (0 < patch->is_new) patch->new_mode = S_IFREG | 0644; else patch->new_mode = patch->old_mode; } } if (new_name && old_name) { int same = !strcmp(old_name, new_name); if (!patch->new_mode) patch->new_mode = patch->old_mode; if ((patch->old_mode ^ patch->new_mode) & S_IFMT) { if (same) return error(_("new mode (%o) of %s does not " "match old mode (%o)"), patch->new_mode, new_name, patch->old_mode); else return error(_("new mode (%o) of %s does not " "match old mode (%o) of %s"), patch->new_mode, new_name, patch->old_mode, old_name); } } if (!state->unsafe_paths && check_unsafe_path(patch)) return -128; /* * An attempt to read from or delete a path that is beyond a * symbolic link will be prevented by load_patch_target() that * is called at the beginning of apply_data() so we do not * have to worry about a patch marked with "is_delete" bit * here. We however need to make sure that the patch result * is not deposited to a path that is beyond a symbolic link * here. */ if (!patch->is_delete && path_is_beyond_symlink(state, patch->new_name)) return error(_("affected file '%s' is beyond a symbolic link"), patch->new_name); if (apply_data(state, patch, &st, ce) < 0) return error(_("%s: patch does not apply"), name); patch->rejected = 0; return 0; } static int check_patch_list(struct apply_state *state, struct patch *patch) { int err = 0; prepare_symlink_changes(state, patch); prepare_fn_table(state, patch); while (patch) { int res; if (state->apply_verbosity > verbosity_normal) say_patch_name(stderr, _("Checking patch %s..."), patch); res = check_patch(state, patch); if (res == -128) return -128; err |= res; patch = patch->next; } return err; } static int read_apply_cache(struct apply_state *state) { if (state->index_file) return read_index_from(state->repo->index, state->index_file, repo_get_git_dir(the_repository)); else return repo_read_index(state->repo); } /* This function tries to read the object name from the current index */ static int get_current_oid(struct apply_state *state, const char *path, struct object_id *oid) { int pos; if (read_apply_cache(state) < 0) return -1; pos = index_name_pos(state->repo->index, path, strlen(path)); if (pos < 0) return -1; oidcpy(oid, &state->repo->index->cache[pos]->oid); return 0; } static int preimage_oid_in_gitlink_patch(struct patch *p, struct object_id *oid) { /* * A usable gitlink patch has only one fragment (hunk) that looks like: * @@ -1 +1 @@ * -Subproject commit * +Subproject commit * or * @@ -1 +0,0 @@ * -Subproject commit * for a removal patch. */ struct fragment *hunk = p->fragments; static const char heading[] = "-Subproject commit "; char *preimage; if (/* does the patch have only one hunk? */ hunk && !hunk->next && /* is its preimage one line? */ hunk->oldpos == 1 && hunk->oldlines == 1 && /* does preimage begin with the heading? */ (preimage = memchr(hunk->patch, '\n', hunk->size)) != NULL && starts_with(++preimage, heading) && /* does it record full SHA-1? */ !get_oid_hex(preimage + sizeof(heading) - 1, oid) && preimage[sizeof(heading) + the_hash_algo->hexsz - 1] == '\n' && /* does the abbreviated name on the index line agree with it? */ starts_with(preimage + sizeof(heading) - 1, p->old_oid_prefix)) return 0; /* it all looks fine */ /* we may have full object name on the index line */ return get_oid_hex(p->old_oid_prefix, oid); } /* Build an index that contains just the files needed for a 3way merge */ static int build_fake_ancestor(struct apply_state *state, struct patch *list) { struct patch *patch; struct index_state result = INDEX_STATE_INIT(state->repo); struct lock_file lock = LOCK_INIT; int res; /* Once we start supporting the reverse patch, it may be * worth showing the new sha1 prefix, but until then... */ for (patch = list; patch; patch = patch->next) { struct object_id oid; struct cache_entry *ce; const char *name; name = patch->old_name ? patch->old_name : patch->new_name; if (0 < patch->is_new) continue; if (S_ISGITLINK(patch->old_mode)) { if (!preimage_oid_in_gitlink_patch(patch, &oid)) ; /* ok, the textual part looks sane */ else return error(_("sha1 information is lacking or " "useless for submodule %s"), name); } else if (!repo_get_oid_blob(the_repository, patch->old_oid_prefix, &oid)) { ; /* ok */ } else if (!patch->lines_added && !patch->lines_deleted) { /* mode-only change: update the current */ if (get_current_oid(state, patch->old_name, &oid)) return error(_("mode change for %s, which is not " "in current HEAD"), name); } else return error(_("sha1 information is lacking or useless " "(%s)."), name); ce = make_cache_entry(&result, patch->old_mode, &oid, name, 0, 0); if (!ce) return error(_("make_cache_entry failed for path '%s'"), name); if (add_index_entry(&result, ce, ADD_CACHE_OK_TO_ADD)) { discard_cache_entry(ce); return error(_("could not add %s to temporary index"), name); } } hold_lock_file_for_update(&lock, state->fake_ancestor, LOCK_DIE_ON_ERROR); res = write_locked_index(&result, &lock, COMMIT_LOCK); discard_index(&result); if (res) return error(_("could not write temporary index to %s"), state->fake_ancestor); return 0; } static void stat_patch_list(struct apply_state *state, struct patch *patch) { int files, adds, dels; for (files = adds = dels = 0 ; patch ; patch = patch->next) { files++; adds += patch->lines_added; dels += patch->lines_deleted; show_stats(state, patch); } print_stat_summary(stdout, files, adds, dels); } static void numstat_patch_list(struct apply_state *state, struct patch *patch) { for ( ; patch; patch = patch->next) { const char *name; name = patch->new_name ? patch->new_name : patch->old_name; if (patch->is_binary) printf("-\t-\t"); else printf("%d\t%d\t", patch->lines_added, patch->lines_deleted); write_name_quoted(name, stdout, state->line_termination); } } static void show_file_mode_name(const char *newdelete, unsigned int mode, const char *name) { if (mode) printf(" %s mode %06o %s\n", newdelete, mode, name); else printf(" %s %s\n", newdelete, name); } static void show_mode_change(struct patch *p, int show_name) { if (p->old_mode && p->new_mode && p->old_mode != p->new_mode) { if (show_name) printf(" mode change %06o => %06o %s\n", p->old_mode, p->new_mode, p->new_name); else printf(" mode change %06o => %06o\n", p->old_mode, p->new_mode); } } static void show_rename_copy(struct patch *p) { const char *renamecopy = p->is_rename ? "rename" : "copy"; const char *old_name, *new_name; /* Find common prefix */ old_name = p->old_name; new_name = p->new_name; while (1) { const char *slash_old, *slash_new; slash_old = strchr(old_name, '/'); slash_new = strchr(new_name, '/'); if (!slash_old || !slash_new || slash_old - old_name != slash_new - new_name || memcmp(old_name, new_name, slash_new - new_name)) break; old_name = slash_old + 1; new_name = slash_new + 1; } /* p->old_name through old_name is the common prefix, and old_name and * new_name through the end of names are renames */ if (old_name != p->old_name) printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy, (int)(old_name - p->old_name), p->old_name, old_name, new_name, p->score); else printf(" %s %s => %s (%d%%)\n", renamecopy, p->old_name, p->new_name, p->score); show_mode_change(p, 0); } static void summary_patch_list(struct patch *patch) { struct patch *p; for (p = patch; p; p = p->next) { if (p->is_new) show_file_mode_name("create", p->new_mode, p->new_name); else if (p->is_delete) show_file_mode_name("delete", p->old_mode, p->old_name); else { if (p->is_rename || p->is_copy) show_rename_copy(p); else { if (p->score) { printf(" rewrite %s (%d%%)\n", p->new_name, p->score); show_mode_change(p, 0); } else show_mode_change(p, 1); } } } } static void patch_stats(struct apply_state *state, struct patch *patch) { int lines = patch->lines_added + patch->lines_deleted; if (lines > state->max_change) state->max_change = lines; if (patch->old_name) { int len = quote_c_style(patch->old_name, NULL, NULL, 0); if (!len) len = strlen(patch->old_name); if (len > state->max_len) state->max_len = len; } if (patch->new_name) { int len = quote_c_style(patch->new_name, NULL, NULL, 0); if (!len) len = strlen(patch->new_name); if (len > state->max_len) state->max_len = len; } } static int remove_file(struct apply_state *state, struct patch *patch, int rmdir_empty) { if (state->update_index && !state->ita_only) { if (remove_file_from_index(state->repo->index, patch->old_name) < 0) return error(_("unable to remove %s from index"), patch->old_name); } if (!state->cached) { if (!remove_or_warn(patch->old_mode, patch->old_name) && rmdir_empty) { remove_path(patch->old_name); } } return 0; } static int add_index_file(struct apply_state *state, const char *path, unsigned mode, void *buf, unsigned long size) { struct stat st; struct cache_entry *ce; int namelen = strlen(path); ce = make_empty_cache_entry(state->repo->index, namelen); memcpy(ce->name, path, namelen); ce->ce_mode = create_ce_mode(mode); ce->ce_flags = create_ce_flags(0); ce->ce_namelen = namelen; if (state->ita_only) { ce->ce_flags |= CE_INTENT_TO_ADD; set_object_name_for_intent_to_add_entry(ce); } else if (S_ISGITLINK(mode)) { const char *s; if (!skip_prefix(buf, "Subproject commit ", &s) || get_oid_hex(s, &ce->oid)) { discard_cache_entry(ce); return error(_("corrupt patch for submodule %s"), path); } } else { if (!state->cached) { if (lstat(path, &st) < 0) { discard_cache_entry(ce); return error_errno(_("unable to stat newly " "created file '%s'"), path); } fill_stat_cache_info(state->repo->index, ce, &st); } if (write_object_file(buf, size, OBJ_BLOB, &ce->oid) < 0) { discard_cache_entry(ce); return error(_("unable to create backing store " "for newly created file %s"), path); } } if (add_index_entry(state->repo->index, ce, ADD_CACHE_OK_TO_ADD) < 0) { discard_cache_entry(ce); return error(_("unable to add cache entry for %s"), path); } return 0; } /* * Returns: * -1 if an unrecoverable error happened * 0 if everything went well * 1 if a recoverable error happened */ static int try_create_file(struct apply_state *state, const char *path, unsigned int mode, const char *buf, unsigned long size) { int fd, res; struct strbuf nbuf = STRBUF_INIT; if (S_ISGITLINK(mode)) { struct stat st; if (!lstat(path, &st) && S_ISDIR(st.st_mode)) return 0; return !!mkdir(path, 0777); } if (has_symlinks && S_ISLNK(mode)) /* Although buf:size is counted string, it also is NUL * terminated. */ return !!create_symlink(state && state->repo ? state->repo->index : NULL, buf, path); fd = open(path, O_CREAT | O_EXCL | O_WRONLY, (mode & 0100) ? 0777 : 0666); if (fd < 0) return 1; if (convert_to_working_tree(state->repo->index, path, buf, size, &nbuf, NULL)) { size = nbuf.len; buf = nbuf.buf; } res = write_in_full(fd, buf, size) < 0; if (res) error_errno(_("failed to write to '%s'"), path); strbuf_release(&nbuf); if (close(fd) < 0 && !res) return error_errno(_("closing file '%s'"), path); return res ? -1 : 0; } /* * We optimistically assume that the directories exist, * which is true 99% of the time anyway. If they don't, * we create them and try again. * * Returns: * -1 on error * 0 otherwise */ static int create_one_file(struct apply_state *state, char *path, unsigned mode, const char *buf, unsigned long size) { char *newpath = NULL; int res; if (state->cached) return 0; /* * We already try to detect whether files are beyond a symlink in our * up-front checks. But in the case where symlinks are created by any * of the intermediate hunks it can happen that our up-front checks * didn't yet see the symlink, but at the point of arriving here there * in fact is one. We thus repeat the check for symlinks here. * * Note that this does not make the up-front check obsolete as the * failure mode is different: * * - The up-front checks cause us to abort before we have written * anything into the working directory. So when we exit this way the * working directory remains clean. * * - The checks here happen in the middle of the action where we have * already started to apply the patch. The end result will be a dirty * working directory. * * Ideally, we should update the up-front checks to catch what would * happen when we apply the patch before we damage the working tree. * We have all the information necessary to do so. But for now, as a * part of embargoed security work, having this check would serve as a * reasonable first step. */ if (path_is_beyond_symlink(state, path)) return error(_("affected file '%s' is beyond a symbolic link"), path); res = try_create_file(state, path, mode, buf, size); if (res < 0) return -1; if (!res) return 0; if (errno == ENOENT) { if (safe_create_leading_directories_no_share(path)) return 0; res = try_create_file(state, path, mode, buf, size); if (res < 0) return -1; if (!res) return 0; } if (errno == EEXIST || errno == EACCES) { /* We may be trying to create a file where a directory * used to be. */ struct stat st; if (!lstat(path, &st) && (!S_ISDIR(st.st_mode) || !rmdir(path))) errno = EEXIST; } if (errno == EEXIST) { unsigned int nr = getpid(); for (;;) { newpath = mkpathdup("%s~%u", path, nr); res = try_create_file(state, newpath, mode, buf, size); if (res < 0) goto out; if (!res) { if (!rename(newpath, path)) goto out; unlink_or_warn(newpath); break; } if (errno != EEXIST) break; ++nr; FREE_AND_NULL(newpath); } } res = error_errno(_("unable to write file '%s' mode %o"), path, mode); out: free(newpath); return res; } static int add_conflicted_stages_file(struct apply_state *state, struct patch *patch) { int stage, namelen; unsigned mode; struct cache_entry *ce; if (!state->update_index) return 0; namelen = strlen(patch->new_name); mode = patch->new_mode ? patch->new_mode : (S_IFREG | 0644); remove_file_from_index(state->repo->index, patch->new_name); for (stage = 1; stage < 4; stage++) { if (is_null_oid(&patch->threeway_stage[stage - 1])) continue; ce = make_empty_cache_entry(state->repo->index, namelen); memcpy(ce->name, patch->new_name, namelen); ce->ce_mode = create_ce_mode(mode); ce->ce_flags = create_ce_flags(stage); ce->ce_namelen = namelen; oidcpy(&ce->oid, &patch->threeway_stage[stage - 1]); if (add_index_entry(state->repo->index, ce, ADD_CACHE_OK_TO_ADD) < 0) { discard_cache_entry(ce); return error(_("unable to add cache entry for %s"), patch->new_name); } } return 0; } static int create_file(struct apply_state *state, struct patch *patch) { char *path = patch->new_name; unsigned mode = patch->new_mode; unsigned long size = patch->resultsize; char *buf = patch->result; if (!mode) mode = S_IFREG | 0644; if (create_one_file(state, path, mode, buf, size)) return -1; if (patch->conflicted_threeway) return add_conflicted_stages_file(state, patch); else if (state->update_index) return add_index_file(state, path, mode, buf, size); return 0; } /* phase zero is to remove, phase one is to create */ static int write_out_one_result(struct apply_state *state, struct patch *patch, int phase) { if (patch->is_delete > 0) { if (phase == 0) return remove_file(state, patch, 1); return 0; } if (patch->is_new > 0 || patch->is_copy) { if (phase == 1) return create_file(state, patch); return 0; } /* * Rename or modification boils down to the same * thing: remove the old, write the new */ if (phase == 0) return remove_file(state, patch, patch->is_rename); if (phase == 1) return create_file(state, patch); return 0; } static int write_out_one_reject(struct apply_state *state, struct patch *patch) { FILE *rej; char *namebuf; struct fragment *frag; int fd, cnt = 0; struct strbuf sb = STRBUF_INIT; for (cnt = 0, frag = patch->fragments; frag; frag = frag->next) { if (!frag->rejected) continue; cnt++; } if (!cnt) { if (state->apply_verbosity > verbosity_normal) say_patch_name(stderr, _("Applied patch %s cleanly."), patch); return 0; } /* This should not happen, because a removal patch that leaves * contents are marked "rejected" at the patch level. */ if (!patch->new_name) die(_("internal error")); /* Say this even without --verbose */ strbuf_addf(&sb, Q_("Applying patch %%s with %d reject...", "Applying patch %%s with %d rejects...", cnt), cnt); if (state->apply_verbosity > verbosity_silent) say_patch_name(stderr, sb.buf, patch); strbuf_release(&sb); namebuf = xstrfmt("%s.rej", patch->new_name); fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666); if (fd < 0) { if (errno != EEXIST) { error_errno(_("cannot open %s"), namebuf); goto error; } if (unlink(namebuf)) { error_errno(_("cannot unlink '%s'"), namebuf); goto error; } fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666); if (fd < 0) { error_errno(_("cannot open %s"), namebuf); goto error; } } rej = fdopen(fd, "w"); if (!rej) { error_errno(_("cannot open %s"), namebuf); close(fd); goto error; } /* Normal git tools never deal with .rej, so do not pretend * this is a git patch by saying --git or giving extended * headers. While at it, maybe please "kompare" that wants * the trailing TAB and some garbage at the end of line ;-). */ fprintf(rej, "diff a/%s b/%s\t(rejected hunks)\n", patch->new_name, patch->new_name); for (cnt = 1, frag = patch->fragments; frag; cnt++, frag = frag->next) { if (!frag->rejected) { if (state->apply_verbosity > verbosity_silent) fprintf_ln(stderr, _("Hunk #%d applied cleanly."), cnt); continue; } if (state->apply_verbosity > verbosity_silent) fprintf_ln(stderr, _("Rejected hunk #%d."), cnt); fprintf(rej, "%.*s", frag->size, frag->patch); if (frag->patch[frag->size-1] != '\n') fputc('\n', rej); } fclose(rej); error: free(namebuf); return -1; } /* * Returns: * -1 if an error happened * 0 if the patch applied cleanly * 1 if the patch did not apply cleanly */ static int write_out_results(struct apply_state *state, struct patch *list) { int phase; int errs = 0; struct patch *l; struct string_list cpath = STRING_LIST_INIT_DUP; for (phase = 0; phase < 2; phase++) { l = list; while (l) { if (l->rejected) errs = 1; else { if (write_out_one_result(state, l, phase)) { string_list_clear(&cpath, 0); return -1; } if (phase == 1) { if (write_out_one_reject(state, l)) errs = 1; if (l->conflicted_threeway) { string_list_append(&cpath, l->new_name); errs = 1; } } } l = l->next; } } if (cpath.nr) { struct string_list_item *item; string_list_sort(&cpath); if (state->apply_verbosity > verbosity_silent) { for_each_string_list_item(item, &cpath) fprintf(stderr, "U %s\n", item->string); } string_list_clear(&cpath, 0); /* * rerere relies on the partially merged result being in the working * tree with conflict markers, but that isn't written with --cached. */ if (!state->cached) repo_rerere(state->repo, 0); } return errs; } /* * Try to apply a patch. * * Returns: * -128 if a bad error happened (like patch unreadable) * -1 if patch did not apply and user cannot deal with it * 0 if the patch applied * 1 if the patch did not apply but user might fix it */ static int apply_patch(struct apply_state *state, int fd, const char *filename, int options) { size_t offset; struct strbuf buf = STRBUF_INIT; /* owns the patch text */ struct patch *list = NULL, **listp = &list; int skipped_patch = 0; int res = 0; int flush_attributes = 0; state->patch_input_file = filename; if (read_patch_file(&buf, fd) < 0) return -128; offset = 0; while (offset < buf.len) { struct patch *patch; int nr; CALLOC_ARRAY(patch, 1); patch->inaccurate_eof = !!(options & APPLY_OPT_INACCURATE_EOF); patch->recount = !!(options & APPLY_OPT_RECOUNT); nr = parse_chunk(state, buf.buf + offset, buf.len - offset, patch); if (nr < 0) { free_patch(patch); if (nr == -128) { res = -128; goto end; } break; } if (state->apply_in_reverse) reverse_patches(patch); if (use_patch(state, patch)) { patch_stats(state, patch); if (!list || !state->apply_in_reverse) { *listp = patch; listp = &patch->next; } else { patch->next = list; list = patch; } if ((patch->new_name && ends_with_path_components(patch->new_name, GITATTRIBUTES_FILE)) || (patch->old_name && ends_with_path_components(patch->old_name, GITATTRIBUTES_FILE))) flush_attributes = 1; } else { if (state->apply_verbosity > verbosity_normal) say_patch_name(stderr, _("Skipped patch '%s'."), patch); free_patch(patch); skipped_patch++; } offset += nr; } if (!list && !skipped_patch) { if (!state->allow_empty) { error(_("No valid patches in input (allow with \"--allow-empty\")")); res = -128; } goto end; } if (state->whitespace_error && (state->ws_error_action == die_on_ws_error)) state->apply = 0; state->update_index = (state->check_index || state->ita_only) && state->apply; if (state->update_index && !is_lock_file_locked(&state->lock_file)) { if (state->index_file) hold_lock_file_for_update(&state->lock_file, state->index_file, LOCK_DIE_ON_ERROR); else repo_hold_locked_index(state->repo, &state->lock_file, LOCK_DIE_ON_ERROR); } if (state->check_index && read_apply_cache(state) < 0) { error(_("unable to read index file")); res = -128; goto end; } if (state->check || state->apply) { int r = check_patch_list(state, list); if (r == -128) { res = -128; goto end; } if (r < 0 && !state->apply_with_reject) { res = -1; goto end; } } if (state->apply) { int write_res = write_out_results(state, list); if (write_res < 0) { res = -128; goto end; } if (write_res > 0) { /* with --3way, we still need to write the index out */ res = state->apply_with_reject ? -1 : 1; goto end; } } if (state->fake_ancestor && build_fake_ancestor(state, list)) { res = -128; goto end; } if (state->diffstat && state->apply_verbosity > verbosity_silent) stat_patch_list(state, list); if (state->numstat && state->apply_verbosity > verbosity_silent) numstat_patch_list(state, list); if (state->summary && state->apply_verbosity > verbosity_silent) summary_patch_list(list); if (flush_attributes) reset_parsed_attributes(); end: free_patch_list(list); strbuf_release(&buf); string_list_clear(&state->fn_table, 0); return res; } static int apply_option_parse_exclude(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; BUG_ON_OPT_NEG(unset); add_name_limit(state, arg, 1); return 0; } static int apply_option_parse_include(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; BUG_ON_OPT_NEG(unset); add_name_limit(state, arg, 0); state->has_include = 1; return 0; } static int apply_option_parse_p(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; BUG_ON_OPT_NEG(unset); state->p_value = atoi(arg); state->p_value_known = 1; return 0; } static int apply_option_parse_space_change(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; BUG_ON_OPT_ARG(arg); if (unset) state->ws_ignore_action = ignore_ws_none; else state->ws_ignore_action = ignore_ws_change; return 0; } static int apply_option_parse_whitespace(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; BUG_ON_OPT_NEG(unset); state->whitespace_option = arg; if (parse_whitespace_option(state, arg)) return -1; return 0; } static int apply_option_parse_directory(const struct option *opt, const char *arg, int unset) { struct apply_state *state = opt->value; BUG_ON_OPT_NEG(unset); strbuf_reset(&state->root); strbuf_addstr(&state->root, arg); strbuf_complete(&state->root, '/'); return 0; } int apply_all_patches(struct apply_state *state, int argc, const char **argv, int options) { int i; int res; int errs = 0; int read_stdin = 1; for (i = 0; i < argc; i++) { const char *arg = argv[i]; char *to_free = NULL; int fd; if (!strcmp(arg, "-")) { res = apply_patch(state, 0, "", options); if (res < 0) goto end; errs |= res; read_stdin = 0; continue; } else arg = to_free = prefix_filename(state->prefix, arg); fd = open(arg, O_RDONLY); if (fd < 0) { error(_("can't open patch '%s': %s"), arg, strerror(errno)); res = -128; free(to_free); goto end; } read_stdin = 0; set_default_whitespace_mode(state); res = apply_patch(state, fd, arg, options); close(fd); free(to_free); if (res < 0) goto end; errs |= res; } set_default_whitespace_mode(state); if (read_stdin) { res = apply_patch(state, 0, "", options); if (res < 0) goto end; errs |= res; } if (state->whitespace_error) { if (state->squelch_whitespace_errors && state->squelch_whitespace_errors < state->whitespace_error) { int squelched = state->whitespace_error - state->squelch_whitespace_errors; warning(Q_("squelched %d whitespace error", "squelched %d whitespace errors", squelched), squelched); } if (state->ws_error_action == die_on_ws_error) { error(Q_("%d line adds whitespace errors.", "%d lines add whitespace errors.", state->whitespace_error), state->whitespace_error); res = -128; goto end; } if (state->applied_after_fixing_ws && state->apply) warning(Q_("%d line applied after" " fixing whitespace errors.", "%d lines applied after" " fixing whitespace errors.", state->applied_after_fixing_ws), state->applied_after_fixing_ws); else if (state->whitespace_error) warning(Q_("%d line adds whitespace errors.", "%d lines add whitespace errors.", state->whitespace_error), state->whitespace_error); } if (state->update_index) { res = write_locked_index(state->repo->index, &state->lock_file, COMMIT_LOCK); if (res) { error(_("Unable to write new index file")); res = -128; goto end; } } res = !!errs; end: rollback_lock_file(&state->lock_file); if (state->apply_verbosity <= verbosity_silent) { set_error_routine(state->saved_error_routine); set_warn_routine(state->saved_warn_routine); } if (res > -1) return res; return (res == -1 ? 1 : 128); } int apply_parse_options(int argc, const char **argv, struct apply_state *state, int *force_apply, int *options, const char * const *apply_usage) { struct option builtin_apply_options[] = { OPT_CALLBACK_F(0, "exclude", state, N_("path"), N_("don't apply changes matching the given path"), PARSE_OPT_NONEG, apply_option_parse_exclude), OPT_CALLBACK_F(0, "include", state, N_("path"), N_("apply changes matching the given path"), PARSE_OPT_NONEG, apply_option_parse_include), OPT_CALLBACK('p', NULL, state, N_("num"), N_("remove leading slashes from traditional diff paths"), apply_option_parse_p), OPT_BOOL(0, "no-add", &state->no_add, N_("ignore additions made by the patch")), OPT_BOOL(0, "stat", &state->diffstat, N_("instead of applying the patch, output diffstat for the input")), OPT_NOOP_NOARG(0, "allow-binary-replacement"), OPT_NOOP_NOARG(0, "binary"), OPT_BOOL(0, "numstat", &state->numstat, N_("show number of added and deleted lines in decimal notation")), OPT_BOOL(0, "summary", &state->summary, N_("instead of applying the patch, output a summary for the input")), OPT_BOOL(0, "check", &state->check, N_("instead of applying the patch, see if the patch is applicable")), OPT_BOOL(0, "index", &state->check_index, N_("make sure the patch is applicable to the current index")), OPT_BOOL('N', "intent-to-add", &state->ita_only, N_("mark new files with `git add --intent-to-add`")), OPT_BOOL(0, "cached", &state->cached, N_("apply a patch without touching the working tree")), OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths, N_("accept a patch that touches outside the working area"), PARSE_OPT_NOCOMPLETE), OPT_BOOL(0, "apply", force_apply, N_("also apply the patch (use with --stat/--summary/--check)")), OPT_BOOL('3', "3way", &state->threeway, N_( "attempt three-way merge, fall back on normal patch if that fails")), OPT_SET_INT_F(0, "ours", &state->merge_variant, N_("for conflicts, use our version"), XDL_MERGE_FAVOR_OURS, PARSE_OPT_NONEG), OPT_SET_INT_F(0, "theirs", &state->merge_variant, N_("for conflicts, use their version"), XDL_MERGE_FAVOR_THEIRS, PARSE_OPT_NONEG), OPT_SET_INT_F(0, "union", &state->merge_variant, N_("for conflicts, use a union version"), XDL_MERGE_FAVOR_UNION, PARSE_OPT_NONEG), OPT_FILENAME(0, "build-fake-ancestor", &state->fake_ancestor, N_("build a temporary index based on embedded index information")), /* Think twice before adding "--nul" synonym to this */ OPT_SET_INT('z', NULL, &state->line_termination, N_("paths are separated with NUL character"), '\0'), OPT_INTEGER('C', NULL, &state->p_context, N_("ensure at least lines of context match")), OPT_CALLBACK(0, "whitespace", state, N_("action"), N_("detect new or modified lines that have whitespace errors"), apply_option_parse_whitespace), OPT_CALLBACK_F(0, "ignore-space-change", state, NULL, N_("ignore changes in whitespace when finding context"), PARSE_OPT_NOARG, apply_option_parse_space_change), OPT_CALLBACK_F(0, "ignore-whitespace", state, NULL, N_("ignore changes in whitespace when finding context"), PARSE_OPT_NOARG, apply_option_parse_space_change), OPT_BOOL('R', "reverse", &state->apply_in_reverse, N_("apply the patch in reverse")), OPT_BOOL(0, "unidiff-zero", &state->unidiff_zero, N_("don't expect at least one line of context")), OPT_BOOL(0, "reject", &state->apply_with_reject, N_("leave the rejected hunks in corresponding *.rej files")), OPT_BOOL(0, "allow-overlap", &state->allow_overlap, N_("allow overlapping hunks")), OPT__VERBOSITY(&state->apply_verbosity), OPT_BIT(0, "inaccurate-eof", options, N_("tolerate incorrectly detected missing new-line at the end of file"), APPLY_OPT_INACCURATE_EOF), OPT_BIT(0, "recount", options, N_("do not trust the line counts in the hunk headers"), APPLY_OPT_RECOUNT), OPT_CALLBACK(0, "directory", state, N_("root"), N_("prepend to all filenames"), apply_option_parse_directory), OPT_BOOL(0, "allow-empty", &state->allow_empty, N_("don't return error for empty patches")), OPT_END() }; argc = parse_options(argc, argv, state->prefix, builtin_apply_options, apply_usage, 0); if (state->merge_variant && !state->threeway) die(_("--ours, --theirs, and --union require --3way")); return argc; } git-cinnabar-0.7.0/git-core/apply.h000064400000000000000000000123631046102023000152020ustar 00000000000000#ifndef APPLY_H #define APPLY_H #include "hash.h" #include "lockfile.h" #include "string-list.h" #include "strmap.h" struct repository; enum apply_ws_error_action { nowarn_ws_error, warn_on_ws_error, die_on_ws_error, correct_ws_error }; enum apply_ws_ignore { ignore_ws_none, ignore_ws_change }; enum apply_verbosity { verbosity_silent = -1, verbosity_normal = 0, verbosity_verbose = 1 }; struct apply_state { const char *prefix; /* Lock file */ struct lock_file lock_file; /* These control what gets looked at and modified */ int apply; /* this is not a dry-run */ int cached; /* apply to the index only */ int check; /* preimage must match working tree, don't actually apply */ int check_index; /* preimage must match the indexed version */ int update_index; /* check_index && apply */ int ita_only; /* add intent-to-add entries to the index */ /* These control cosmetic aspect of the output */ int diffstat; /* just show a diffstat, and don't actually apply */ int numstat; /* just show a numeric diffstat, and don't actually apply */ int summary; /* just report creation, deletion, etc, and don't actually apply */ /* These boolean parameters control how the apply is done */ int allow_overlap; int apply_in_reverse; int apply_with_reject; int no_add; int threeway; int unidiff_zero; int unsafe_paths; int allow_empty; /* Other non boolean parameters */ struct repository *repo; const char *index_file; enum apply_verbosity apply_verbosity; int merge_variant; char *fake_ancestor; const char *patch_input_file; int line_termination; struct strbuf root; int p_value; int p_value_known; unsigned int p_context; /* Exclude and include path parameters */ struct string_list limit_by_name; int has_include; /* Various "current state" */ int linenr; /* current line number */ /* * We need to keep track of how symlinks in the preimage are * manipulated by the patches. A patch to add a/b/c where a/b * is a symlink should not be allowed to affect the directory * the symlink points at, but if the same patch removes a/b, * it is perfectly fine, as the patch removes a/b to make room * to create a directory a/b so that a/b/c can be created. */ struct strset removed_symlinks; struct strset kept_symlinks; /* * For "diff-stat" like behaviour, we keep track of the biggest change * we've seen, and the longest filename. That allows us to do simple * scaling. */ int max_change; int max_len; /* * Records filenames that have been touched, in order to handle * the case where more than one patches touch the same file. */ struct string_list fn_table; /* * This is to save reporting routines before using * set_error_routine() or set_warn_routine() to install muting * routines when in verbosity_silent mode. */ void (*saved_error_routine)(const char *err, va_list params); void (*saved_warn_routine)(const char *warn, va_list params); /* These control whitespace errors */ enum apply_ws_error_action ws_error_action; enum apply_ws_ignore ws_ignore_action; const char *whitespace_option; int whitespace_error; int squelch_whitespace_errors; int applied_after_fixing_ws; }; /* * This represents a "patch" to a file, both metainfo changes * such as creation/deletion, filemode and content changes represented * as a series of fragments. */ struct patch { char *new_name, *old_name, *def_name; unsigned int old_mode, new_mode; int is_new, is_delete; /* -1 = unknown, 0 = false, 1 = true */ int rejected; unsigned ws_rule; int lines_added, lines_deleted; int score; int extension_linenr; /* first line specifying delete/new/rename/copy */ unsigned int is_toplevel_relative:1; unsigned int inaccurate_eof:1; unsigned int is_binary:1; unsigned int is_copy:1; unsigned int is_rename:1; unsigned int recount:1; unsigned int conflicted_threeway:1; unsigned int direct_to_threeway:1; unsigned int crlf_in_old:1; struct fragment *fragments; char *result; size_t resultsize; char old_oid_prefix[GIT_MAX_HEXSZ + 1]; char new_oid_prefix[GIT_MAX_HEXSZ + 1]; struct patch *next; /* three-way fallback result */ struct object_id threeway_stage[3]; }; int apply_parse_options(int argc, const char **argv, struct apply_state *state, int *force_apply, int *options, const char * const *apply_usage); int init_apply_state(struct apply_state *state, struct repository *repo, const char *prefix); void clear_apply_state(struct apply_state *state); int check_apply_state(struct apply_state *state, int force_apply); /* * Parse a git diff header, starting at line. Fills the relevant * metadata information in 'struct patch'. * * Returns -1 on failure, the length of the parsed header otherwise. */ int parse_git_diff_header(struct strbuf *root, int *linenr, int p_value, const char *line, int len, unsigned int size, struct patch *patch); void release_patch(struct patch *patch); /* * Some aspects of the apply behavior are controlled by the following * bits in the "options" parameter passed to apply_all_patches(). */ #define APPLY_OPT_INACCURATE_EOF (1<<0) /* accept inaccurate eof */ #define APPLY_OPT_RECOUNT (1<<1) /* accept inaccurate line count */ int apply_all_patches(struct apply_state *state, int argc, const char **argv, int options); #endif git-cinnabar-0.7.0/git-core/archive-tar.c000064400000000000000000000337501046102023000162600ustar 00000000000000/* * Copyright (c) 2005, 2006 Rene Scharfe */ #define USE_THE_REPOSITORY_VARIABLE #include "git-compat-util.h" #include "config.h" #include "gettext.h" #include "git-zlib.h" #include "hex.h" #include "tar.h" #include "archive.h" #include "object-store-ll.h" #include "strbuf.h" #include "streaming.h" #include "run-command.h" #include "write-or-die.h" #define RECORDSIZE (512) #define BLOCKSIZE (RECORDSIZE * 20) static char block[BLOCKSIZE]; static unsigned long offset; static int tar_umask = 002; static int write_tar_filter_archive(const struct archiver *ar, struct archiver_args *args); /* * This is the max value that a ustar size header can specify, as it is fixed * at 11 octal digits. POSIX specifies that we switch to extended headers at * this size. * * Likewise for the mtime (which happens to use a buffer of the same size). */ #if ULONG_MAX == 0xFFFFFFFF #define USTAR_MAX_SIZE ULONG_MAX #else #define USTAR_MAX_SIZE 077777777777UL #endif #if TIME_MAX == 0xFFFFFFFF #define USTAR_MAX_MTIME TIME_MAX #else #define USTAR_MAX_MTIME 077777777777ULL #endif static void tar_write_block(const void *buf) { write_or_die(1, buf, BLOCKSIZE); } static void (*write_block)(const void *) = tar_write_block; /* writes out the whole block, but only if it is full */ static void write_if_needed(void) { if (offset == BLOCKSIZE) { write_block(block); offset = 0; } } /* * queues up writes, so that all our write(2) calls write exactly one * full block; pads writes to RECORDSIZE */ static void do_write_blocked(const void *data, unsigned long size) { const char *buf = data; if (offset) { unsigned long chunk = BLOCKSIZE - offset; if (size < chunk) chunk = size; memcpy(block + offset, buf, chunk); size -= chunk; offset += chunk; buf += chunk; write_if_needed(); } while (size >= BLOCKSIZE) { write_block(buf); size -= BLOCKSIZE; buf += BLOCKSIZE; } if (size) { memcpy(block + offset, buf, size); offset += size; } } static void finish_record(void) { unsigned long tail; tail = offset % RECORDSIZE; if (tail) { memset(block + offset, 0, RECORDSIZE - tail); offset += RECORDSIZE - tail; } write_if_needed(); } static void write_blocked(const void *data, unsigned long size) { do_write_blocked(data, size); finish_record(); } /* * The end of tar archives is marked by 2*512 nul bytes and after that * follows the rest of the block (if any). */ static void write_trailer(void) { int tail = BLOCKSIZE - offset; memset(block + offset, 0, tail); write_block(block); if (tail < 2 * RECORDSIZE) { memset(block, 0, offset); write_block(block); } } /* * queues up writes, so that all our write(2) calls write exactly one * full block; pads writes to RECORDSIZE */ static int stream_blocked(struct repository *r, const struct object_id *oid) { struct git_istream *st; enum object_type type; unsigned long sz; char buf[BLOCKSIZE]; ssize_t readlen; st = open_istream(r, oid, &type, &sz, NULL); if (!st) return error(_("cannot stream blob %s"), oid_to_hex(oid)); for (;;) { readlen = read_istream(st, buf, sizeof(buf)); if (readlen <= 0) break; do_write_blocked(buf, readlen); } close_istream(st); if (!readlen) finish_record(); return readlen; } /* * pax extended header records have the format "%u %s=%s\n". %u contains * the size of the whole string (including the %u), the first %s is the * keyword, the second one is the value. This function constructs such a * string and appends it to a struct strbuf. */ static void strbuf_append_ext_header(struct strbuf *sb, const char *keyword, const char *value, size_t valuelen) { size_t orig_len = sb->len; size_t len, tmp; /* "%u %s=%s\n" */ len = 1 + 1 + strlen(keyword) + 1 + valuelen + 1; for (tmp = 1; len / 10 >= tmp; tmp *= 10) len++; strbuf_grow(sb, len); strbuf_addf(sb, "%"PRIuMAX" %s=", (uintmax_t)len, keyword); strbuf_add(sb, value, valuelen); strbuf_addch(sb, '\n'); if (len != sb->len - orig_len) BUG("pax extended header length miscalculated as %"PRIuMAX ", should be %"PRIuMAX, (uintmax_t)len, (uintmax_t)(sb->len - orig_len)); } /* * Like strbuf_append_ext_header, but for numeric values. */ static void strbuf_append_ext_header_uint(struct strbuf *sb, const char *keyword, uintmax_t value) { char buf[40]; /* big enough for 2^128 in decimal, plus NUL */ int len; len = xsnprintf(buf, sizeof(buf), "%"PRIuMAX, value); strbuf_append_ext_header(sb, keyword, buf, len); } static unsigned int ustar_header_chksum(const struct ustar_header *header) { const unsigned char *p = (const unsigned char *)header; unsigned int chksum = 0; while (p < (const unsigned char *)header->chksum) chksum += *p++; chksum += sizeof(header->chksum) * ' '; p += sizeof(header->chksum); while (p < (const unsigned char *)header + sizeof(struct ustar_header)) chksum += *p++; return chksum; } static size_t get_path_prefix(const char *path, size_t pathlen, size_t maxlen) { size_t i = pathlen; if (i > 1 && path[i - 1] == '/') i--; if (i > maxlen) i = maxlen; do { i--; } while (i > 0 && path[i] != '/'); return i; } static void prepare_header(struct archiver_args *args, struct ustar_header *header, unsigned int mode, unsigned long size) { xsnprintf(header->mode, sizeof(header->mode), "%07o", mode & 07777); xsnprintf(header->size, sizeof(header->size), "%011"PRIoMAX , S_ISREG(mode) ? (uintmax_t)size : (uintmax_t)0); xsnprintf(header->mtime, sizeof(header->mtime), "%011lo", (unsigned long) args->time); xsnprintf(header->uid, sizeof(header->uid), "%07o", 0); xsnprintf(header->gid, sizeof(header->gid), "%07o", 0); strlcpy(header->uname, "root", sizeof(header->uname)); strlcpy(header->gname, "root", sizeof(header->gname)); xsnprintf(header->devmajor, sizeof(header->devmajor), "%07o", 0); xsnprintf(header->devminor, sizeof(header->devminor), "%07o", 0); memcpy(header->magic, "ustar", 6); memcpy(header->version, "00", 2); xsnprintf(header->chksum, sizeof(header->chksum), "%07o", ustar_header_chksum(header)); } static void write_extended_header(struct archiver_args *args, const struct object_id *oid, const void *buffer, unsigned long size) { struct ustar_header header; unsigned int mode; memset(&header, 0, sizeof(header)); *header.typeflag = TYPEFLAG_EXT_HEADER; mode = 0100666; xsnprintf(header.name, sizeof(header.name), "%s.paxheader", oid_to_hex(oid)); prepare_header(args, &header, mode, size); write_blocked(&header, sizeof(header)); write_blocked(buffer, size); } static int write_tar_entry(struct archiver_args *args, const struct object_id *oid, const char *path, size_t pathlen, unsigned int mode, void *buffer, unsigned long size) { struct ustar_header header; struct strbuf ext_header = STRBUF_INIT; unsigned long size_in_header; int err = 0; memset(&header, 0, sizeof(header)); if (S_ISDIR(mode) || S_ISGITLINK(mode)) { *header.typeflag = TYPEFLAG_DIR; mode = (mode | 0777) & ~tar_umask; } else if (S_ISLNK(mode)) { *header.typeflag = TYPEFLAG_LNK; mode |= 0777; } else if (S_ISREG(mode)) { *header.typeflag = TYPEFLAG_REG; mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask; } else { return error(_("unsupported file mode: 0%o (SHA1: %s)"), mode, oid_to_hex(oid)); } if (pathlen > sizeof(header.name)) { size_t plen = get_path_prefix(path, pathlen, sizeof(header.prefix)); size_t rest = pathlen - plen - 1; if (plen > 0 && rest <= sizeof(header.name)) { memcpy(header.prefix, path, plen); memcpy(header.name, path + plen + 1, rest); } else { xsnprintf(header.name, sizeof(header.name), "%s.data", oid_to_hex(oid)); strbuf_append_ext_header(&ext_header, "path", path, pathlen); } } else memcpy(header.name, path, pathlen); if (S_ISLNK(mode)) { if (size > sizeof(header.linkname)) { xsnprintf(header.linkname, sizeof(header.linkname), "see %s.paxheader", oid_to_hex(oid)); strbuf_append_ext_header(&ext_header, "linkpath", buffer, size); } else memcpy(header.linkname, buffer, size); } size_in_header = size; if (S_ISREG(mode) && size > USTAR_MAX_SIZE) { size_in_header = 0; strbuf_append_ext_header_uint(&ext_header, "size", size); } prepare_header(args, &header, mode, size_in_header); if (ext_header.len > 0) { write_extended_header(args, oid, ext_header.buf, ext_header.len); } strbuf_release(&ext_header); write_blocked(&header, sizeof(header)); if (S_ISREG(mode) && size > 0) { if (buffer) write_blocked(buffer, size); else err = stream_blocked(args->repo, oid); } return err; } static void write_global_extended_header(struct archiver_args *args) { const struct object_id *oid = args->commit_oid; struct strbuf ext_header = STRBUF_INIT; struct ustar_header header; unsigned int mode; if (oid) strbuf_append_ext_header(&ext_header, "comment", oid_to_hex(oid), the_hash_algo->hexsz); if (args->time > USTAR_MAX_MTIME) { strbuf_append_ext_header_uint(&ext_header, "mtime", args->time); args->time = USTAR_MAX_MTIME; } if (!ext_header.len) return; memset(&header, 0, sizeof(header)); *header.typeflag = TYPEFLAG_GLOBAL_HEADER; mode = 0100666; xsnprintf(header.name, sizeof(header.name), "pax_global_header"); prepare_header(args, &header, mode, ext_header.len); write_blocked(&header, sizeof(header)); write_blocked(ext_header.buf, ext_header.len); strbuf_release(&ext_header); } static struct archiver **tar_filters; static int nr_tar_filters; static int alloc_tar_filters; static struct archiver *find_tar_filter(const char *name, size_t len) { int i; for (i = 0; i < nr_tar_filters; i++) { struct archiver *ar = tar_filters[i]; if (!xstrncmpz(ar->name, name, len)) return ar; } return NULL; } static int tar_filter_config(const char *var, const char *value, void *data UNUSED) { struct archiver *ar; const char *name; const char *type; size_t namelen; if (parse_config_key(var, "tar", &name, &namelen, &type) < 0 || !name) return 0; ar = find_tar_filter(name, namelen); if (!ar) { CALLOC_ARRAY(ar, 1); ar->name = xmemdupz(name, namelen); ar->write_archive = write_tar_filter_archive; ar->flags = ARCHIVER_WANT_COMPRESSION_LEVELS | ARCHIVER_HIGH_COMPRESSION_LEVELS; ALLOC_GROW(tar_filters, nr_tar_filters + 1, alloc_tar_filters); tar_filters[nr_tar_filters++] = ar; } if (!strcmp(type, "command")) { if (!value) return config_error_nonbool(var); free(ar->filter_command); ar->filter_command = xstrdup(value); return 0; } if (!strcmp(type, "remote")) { if (git_config_bool(var, value)) ar->flags |= ARCHIVER_REMOTE; else ar->flags &= ~ARCHIVER_REMOTE; return 0; } return 0; } static int git_tar_config(const char *var, const char *value, const struct config_context *ctx, void *cb) { if (!strcmp(var, "tar.umask")) { if (value && !strcmp(value, "user")) { tar_umask = umask(0); umask(tar_umask); } else { tar_umask = git_config_int(var, value, ctx->kvi); } return 0; } return tar_filter_config(var, value, cb); } static int write_tar_archive(const struct archiver *ar UNUSED, struct archiver_args *args) { int err = 0; write_global_extended_header(args); err = write_archive_entries(args, write_tar_entry); if (!err) write_trailer(); return err; } static git_zstream gzstream; static unsigned char outbuf[16384]; static void tgz_deflate(int flush) { while (gzstream.avail_in || flush == Z_FINISH) { int status = git_deflate(&gzstream, flush); if (!gzstream.avail_out || status == Z_STREAM_END) { write_or_die(1, outbuf, gzstream.next_out - outbuf); gzstream.next_out = outbuf; gzstream.avail_out = sizeof(outbuf); if (status == Z_STREAM_END) break; } if (status != Z_OK && status != Z_BUF_ERROR) die(_("deflate error (%d)"), status); } } static void tgz_write_block(const void *data) { gzstream.next_in = (void *)data; gzstream.avail_in = BLOCKSIZE; tgz_deflate(Z_NO_FLUSH); } static const char internal_gzip_command[] = "git archive gzip"; static int write_tar_filter_archive(const struct archiver *ar, struct archiver_args *args) { #if ZLIB_VERNUM >= 0x1221 struct gz_header_s gzhead = { .os = 3 }; /* Unix, for reproducibility */ #endif struct strbuf cmd = STRBUF_INIT; struct child_process filter = CHILD_PROCESS_INIT; int r; if (!ar->filter_command) BUG("tar-filter archiver called with no filter defined"); if (!strcmp(ar->filter_command, internal_gzip_command)) { write_block = tgz_write_block; git_deflate_init_gzip(&gzstream, args->compression_level); #if ZLIB_VERNUM >= 0x1221 if (deflateSetHeader(&gzstream.z, &gzhead) != Z_OK) BUG("deflateSetHeader() called too late"); #endif gzstream.next_out = outbuf; gzstream.avail_out = sizeof(outbuf); r = write_tar_archive(ar, args); tgz_deflate(Z_FINISH); git_deflate_end(&gzstream); return r; } strbuf_addstr(&cmd, ar->filter_command); if (args->compression_level >= 0) strbuf_addf(&cmd, " -%d", args->compression_level); strvec_push(&filter.args, cmd.buf); filter.use_shell = 1; filter.in = -1; filter.silent_exec_failure = 1; if (start_command(&filter) < 0) die_errno(_("unable to start '%s' filter"), cmd.buf); close(1); if (dup2(filter.in, 1) < 0) die_errno(_("unable to redirect descriptor")); close(filter.in); r = write_tar_archive(ar, args); close(1); if (finish_command(&filter) != 0) die(_("'%s' filter reported error"), cmd.buf); strbuf_release(&cmd); return r; } static struct archiver tar_archiver = { .name = "tar", .write_archive = write_tar_archive, .flags = ARCHIVER_REMOTE, }; void init_tar_archiver(void) { int i; register_archiver(&tar_archiver); tar_filter_config("tar.tgz.command", internal_gzip_command, NULL); tar_filter_config("tar.tgz.remote", "true", NULL); tar_filter_config("tar.tar.gz.command", internal_gzip_command, NULL); tar_filter_config("tar.tar.gz.remote", "true", NULL); git_config(git_tar_config, NULL); for (i = 0; i < nr_tar_filters; i++) { /* omit any filters that never had a command configured */ if (tar_filters[i]->filter_command) register_archiver(tar_filters[i]); } } git-cinnabar-0.7.0/git-core/archive-zip.c000064400000000000000000000425231046102023000162720ustar 00000000000000/* * Copyright (c) 2006 Rene Scharfe */ #define USE_THE_REPOSITORY_VARIABLE #include "git-compat-util.h" #include "config.h" #include "archive.h" #include "gettext.h" #include "git-zlib.h" #include "hex.h" #include "streaming.h" #include "utf8.h" #include "object-store-ll.h" #include "strbuf.h" #include "userdiff.h" #include "write-or-die.h" #include "xdiff-interface.h" #include "date.h" static int zip_date; static int zip_time; /* We only care about the "buf" part here. */ static struct strbuf zip_dir; static uintmax_t zip_offset; static uint64_t zip_dir_entries; static unsigned int max_creator_version; #define ZIP_STREAM (1 << 3) #define ZIP_UTF8 (1 << 11) enum zip_method { ZIP_METHOD_STORE = 0, ZIP_METHOD_DEFLATE = 8 }; struct zip_local_header { unsigned char magic[4]; unsigned char version[2]; unsigned char flags[2]; unsigned char compression_method[2]; unsigned char mtime[2]; unsigned char mdate[2]; unsigned char crc32[4]; unsigned char compressed_size[4]; unsigned char size[4]; unsigned char filename_length[2]; unsigned char extra_length[2]; unsigned char _end[1]; }; struct zip_data_desc { unsigned char magic[4]; unsigned char crc32[4]; unsigned char compressed_size[4]; unsigned char size[4]; unsigned char _end[1]; }; struct zip64_data_desc { unsigned char magic[4]; unsigned char crc32[4]; unsigned char compressed_size[8]; unsigned char size[8]; unsigned char _end[1]; }; struct zip_dir_trailer { unsigned char magic[4]; unsigned char disk[2]; unsigned char directory_start_disk[2]; unsigned char entries_on_this_disk[2]; unsigned char entries[2]; unsigned char size[4]; unsigned char offset[4]; unsigned char comment_length[2]; unsigned char _end[1]; }; struct zip_extra_mtime { unsigned char magic[2]; unsigned char extra_size[2]; unsigned char flags[1]; unsigned char mtime[4]; unsigned char _end[1]; }; struct zip64_extra { unsigned char magic[2]; unsigned char extra_size[2]; unsigned char size[8]; unsigned char compressed_size[8]; unsigned char _end[1]; }; struct zip64_dir_trailer { unsigned char magic[4]; unsigned char record_size[8]; unsigned char creator_version[2]; unsigned char version[2]; unsigned char disk[4]; unsigned char directory_start_disk[4]; unsigned char entries_on_this_disk[8]; unsigned char entries[8]; unsigned char size[8]; unsigned char offset[8]; unsigned char _end[1]; }; struct zip64_dir_trailer_locator { unsigned char magic[4]; unsigned char disk[4]; unsigned char offset[8]; unsigned char number_of_disks[4]; unsigned char _end[1]; }; /* * On ARM, padding is added at the end of the struct, so a simple * sizeof(struct ...) reports two bytes more than the payload size * we're interested in. */ #define ZIP_LOCAL_HEADER_SIZE offsetof(struct zip_local_header, _end) #define ZIP_DATA_DESC_SIZE offsetof(struct zip_data_desc, _end) #define ZIP64_DATA_DESC_SIZE offsetof(struct zip64_data_desc, _end) #define ZIP_DIR_HEADER_SIZE offsetof(struct zip_dir_header, _end) #define ZIP_DIR_TRAILER_SIZE offsetof(struct zip_dir_trailer, _end) #define ZIP_EXTRA_MTIME_SIZE offsetof(struct zip_extra_mtime, _end) #define ZIP_EXTRA_MTIME_PAYLOAD_SIZE \ (ZIP_EXTRA_MTIME_SIZE - offsetof(struct zip_extra_mtime, flags)) #define ZIP64_EXTRA_SIZE offsetof(struct zip64_extra, _end) #define ZIP64_EXTRA_PAYLOAD_SIZE \ (ZIP64_EXTRA_SIZE - offsetof(struct zip64_extra, size)) #define ZIP64_DIR_TRAILER_SIZE offsetof(struct zip64_dir_trailer, _end) #define ZIP64_DIR_TRAILER_RECORD_SIZE \ (ZIP64_DIR_TRAILER_SIZE - \ offsetof(struct zip64_dir_trailer, creator_version)) #define ZIP64_DIR_TRAILER_LOCATOR_SIZE \ offsetof(struct zip64_dir_trailer_locator, _end) static void copy_le16(unsigned char *dest, unsigned int n) { dest[0] = 0xff & n; dest[1] = 0xff & (n >> 010); } static void copy_le32(unsigned char *dest, unsigned int n) { dest[0] = 0xff & n; dest[1] = 0xff & (n >> 010); dest[2] = 0xff & (n >> 020); dest[3] = 0xff & (n >> 030); } static void copy_le64(unsigned char *dest, uint64_t n) { dest[0] = 0xff & n; dest[1] = 0xff & (n >> 010); dest[2] = 0xff & (n >> 020); dest[3] = 0xff & (n >> 030); dest[4] = 0xff & (n >> 040); dest[5] = 0xff & (n >> 050); dest[6] = 0xff & (n >> 060); dest[7] = 0xff & (n >> 070); } static uint64_t clamp_max(uint64_t n, uint64_t max, int *clamped) { if (n <= max) return n; *clamped = 1; return max; } static void copy_le16_clamp(unsigned char *dest, uint64_t n, int *clamped) { copy_le16(dest, clamp_max(n, 0xffff, clamped)); } static void copy_le32_clamp(unsigned char *dest, uint64_t n, int *clamped) { copy_le32(dest, clamp_max(n, 0xffffffff, clamped)); } static int strbuf_add_le(struct strbuf *sb, size_t size, uintmax_t n) { while (size-- > 0) { strbuf_addch(sb, n & 0xff); n >>= 8; } return -!!n; } static uint32_t clamp32(uintmax_t n) { const uintmax_t max = 0xffffffff; return (n < max) ? n : max; } static void *zlib_deflate_raw(void *data, unsigned long size, int compression_level, unsigned long *compressed_size) { git_zstream stream; unsigned long maxsize; void *buffer; int result; git_deflate_init_raw(&stream, compression_level); maxsize = git_deflate_bound(&stream, size); buffer = xmalloc(maxsize); stream.next_in = data; stream.avail_in = size; stream.next_out = buffer; stream.avail_out = maxsize; do { result = git_deflate(&stream, Z_FINISH); } while (result == Z_OK); if (result != Z_STREAM_END) { free(buffer); return NULL; } git_deflate_end(&stream); *compressed_size = stream.total_out; return buffer; } static void write_zip_data_desc(unsigned long size, unsigned long compressed_size, unsigned long crc) { if (size >= 0xffffffff || compressed_size >= 0xffffffff) { struct zip64_data_desc trailer; copy_le32(trailer.magic, 0x08074b50); copy_le32(trailer.crc32, crc); copy_le64(trailer.compressed_size, compressed_size); copy_le64(trailer.size, size); write_or_die(1, &trailer, ZIP64_DATA_DESC_SIZE); zip_offset += ZIP64_DATA_DESC_SIZE; } else { struct zip_data_desc trailer; copy_le32(trailer.magic, 0x08074b50); copy_le32(trailer.crc32, crc); copy_le32(trailer.compressed_size, compressed_size); copy_le32(trailer.size, size); write_or_die(1, &trailer, ZIP_DATA_DESC_SIZE); zip_offset += ZIP_DATA_DESC_SIZE; } } static void set_zip_header_data_desc(struct zip_local_header *header, unsigned long size, unsigned long compressed_size, unsigned long crc) { copy_le32(header->crc32, crc); copy_le32(header->compressed_size, compressed_size); copy_le32(header->size, size); } static int has_only_ascii(const char *s) { for (;;) { int c = *s++; if (c == '\0') return 1; if (!isascii(c)) return 0; } } static int entry_is_binary(struct index_state *istate, const char *path, const void *buffer, size_t size) { struct userdiff_driver *driver = userdiff_find_by_path(istate, path); if (!driver) driver = userdiff_find_by_name("default"); if (driver->binary != -1) return driver->binary; return buffer_is_binary(buffer, size); } #define STREAM_BUFFER_SIZE (1024 * 16) static int write_zip_entry(struct archiver_args *args, const struct object_id *oid, const char *path, size_t pathlen, unsigned int mode, void *buffer, unsigned long size) { struct zip_local_header header; uintmax_t offset = zip_offset; struct zip_extra_mtime extra; struct zip64_extra extra64; size_t header_extra_size = ZIP_EXTRA_MTIME_SIZE; int need_zip64_extra = 0; unsigned long attr2; unsigned long compressed_size; unsigned long crc; enum zip_method method; unsigned char *out; void *deflated = NULL; struct git_istream *stream = NULL; unsigned long flags = 0; int is_binary = -1; const char *path_without_prefix = path + args->baselen; unsigned int creator_version = 0; unsigned int version_needed = 10; size_t zip_dir_extra_size = ZIP_EXTRA_MTIME_SIZE; size_t zip64_dir_extra_payload_size = 0; crc = crc32(0, NULL, 0); if (!has_only_ascii(path)) { if (is_utf8(path)) flags |= ZIP_UTF8; else warning(_("path is not valid UTF-8: %s"), path); } if (pathlen > 0xffff) { return error(_("path too long (%d chars, SHA1: %s): %s"), (int)pathlen, oid_to_hex(oid), path); } if (S_ISDIR(mode) || S_ISGITLINK(mode)) { method = ZIP_METHOD_STORE; attr2 = 16; out = NULL; compressed_size = 0; } else if (S_ISREG(mode) || S_ISLNK(mode)) { method = ZIP_METHOD_STORE; attr2 = S_ISLNK(mode) ? ((mode | 0777) << 16) : (mode & 0111) ? ((mode) << 16) : 0; if (S_ISLNK(mode) || (mode & 0111)) creator_version = 0x0317; if (S_ISREG(mode) && args->compression_level != 0 && size > 0) method = ZIP_METHOD_DEFLATE; if (!buffer) { enum object_type type; stream = open_istream(args->repo, oid, &type, &size, NULL); if (!stream) return error(_("cannot stream blob %s"), oid_to_hex(oid)); flags |= ZIP_STREAM; out = NULL; } else { crc = crc32(crc, buffer, size); is_binary = entry_is_binary(args->repo->index, path_without_prefix, buffer, size); out = buffer; } compressed_size = (method == ZIP_METHOD_STORE) ? size : 0; } else { return error(_("unsupported file mode: 0%o (SHA1: %s)"), mode, oid_to_hex(oid)); } if (creator_version > max_creator_version) max_creator_version = creator_version; if (buffer && method == ZIP_METHOD_DEFLATE) { out = deflated = zlib_deflate_raw(buffer, size, args->compression_level, &compressed_size); if (!out || compressed_size >= size) { out = buffer; method = ZIP_METHOD_STORE; compressed_size = size; } } copy_le16(extra.magic, 0x5455); copy_le16(extra.extra_size, ZIP_EXTRA_MTIME_PAYLOAD_SIZE); extra.flags[0] = 1; /* just mtime */ copy_le32(extra.mtime, args->time); if (size > 0xffffffff || compressed_size > 0xffffffff) need_zip64_extra = 1; if (stream && size > 0x7fffffff) need_zip64_extra = 1; if (need_zip64_extra) version_needed = 45; copy_le32(header.magic, 0x04034b50); copy_le16(header.version, version_needed); copy_le16(header.flags, flags); copy_le16(header.compression_method, method); copy_le16(header.mtime, zip_time); copy_le16(header.mdate, zip_date); if (need_zip64_extra) { set_zip_header_data_desc(&header, 0xffffffff, 0xffffffff, crc); header_extra_size += ZIP64_EXTRA_SIZE; } else { set_zip_header_data_desc(&header, size, compressed_size, crc); } copy_le16(header.filename_length, pathlen); copy_le16(header.extra_length, header_extra_size); write_or_die(1, &header, ZIP_LOCAL_HEADER_SIZE); zip_offset += ZIP_LOCAL_HEADER_SIZE; write_or_die(1, path, pathlen); zip_offset += pathlen; write_or_die(1, &extra, ZIP_EXTRA_MTIME_SIZE); zip_offset += ZIP_EXTRA_MTIME_SIZE; if (need_zip64_extra) { copy_le16(extra64.magic, 0x0001); copy_le16(extra64.extra_size, ZIP64_EXTRA_PAYLOAD_SIZE); copy_le64(extra64.size, size); copy_le64(extra64.compressed_size, compressed_size); write_or_die(1, &extra64, ZIP64_EXTRA_SIZE); zip_offset += ZIP64_EXTRA_SIZE; } if (stream && method == ZIP_METHOD_STORE) { unsigned char buf[STREAM_BUFFER_SIZE]; ssize_t readlen; for (;;) { readlen = read_istream(stream, buf, sizeof(buf)); if (readlen <= 0) break; crc = crc32(crc, buf, readlen); if (is_binary == -1) is_binary = entry_is_binary(args->repo->index, path_without_prefix, buf, readlen); write_or_die(1, buf, readlen); } close_istream(stream); if (readlen) return readlen; compressed_size = size; zip_offset += compressed_size; write_zip_data_desc(size, compressed_size, crc); } else if (stream && method == ZIP_METHOD_DEFLATE) { unsigned char buf[STREAM_BUFFER_SIZE]; ssize_t readlen; git_zstream zstream; int result; size_t out_len; unsigned char compressed[STREAM_BUFFER_SIZE * 2]; git_deflate_init_raw(&zstream, args->compression_level); compressed_size = 0; zstream.next_out = compressed; zstream.avail_out = sizeof(compressed); for (;;) { readlen = read_istream(stream, buf, sizeof(buf)); if (readlen <= 0) break; crc = crc32(crc, buf, readlen); if (is_binary == -1) is_binary = entry_is_binary(args->repo->index, path_without_prefix, buf, readlen); zstream.next_in = buf; zstream.avail_in = readlen; result = git_deflate(&zstream, 0); if (result != Z_OK) die(_("deflate error (%d)"), result); out_len = zstream.next_out - compressed; if (out_len > 0) { write_or_die(1, compressed, out_len); compressed_size += out_len; zstream.next_out = compressed; zstream.avail_out = sizeof(compressed); } } close_istream(stream); if (readlen) return readlen; zstream.next_in = buf; zstream.avail_in = 0; result = git_deflate(&zstream, Z_FINISH); if (result != Z_STREAM_END) die("deflate error (%d)", result); git_deflate_end(&zstream); out_len = zstream.next_out - compressed; write_or_die(1, compressed, out_len); compressed_size += out_len; zip_offset += compressed_size; write_zip_data_desc(size, compressed_size, crc); } else if (compressed_size > 0) { write_or_die(1, out, compressed_size); zip_offset += compressed_size; } free(deflated); if (compressed_size > 0xffffffff || size > 0xffffffff || offset > 0xffffffff) { if (compressed_size >= 0xffffffff) zip64_dir_extra_payload_size += 8; if (size >= 0xffffffff) zip64_dir_extra_payload_size += 8; if (offset >= 0xffffffff) zip64_dir_extra_payload_size += 8; zip_dir_extra_size += 2 + 2 + zip64_dir_extra_payload_size; } strbuf_add_le(&zip_dir, 4, 0x02014b50); /* magic */ strbuf_add_le(&zip_dir, 2, creator_version); strbuf_add_le(&zip_dir, 2, version_needed); strbuf_add_le(&zip_dir, 2, flags); strbuf_add_le(&zip_dir, 2, method); strbuf_add_le(&zip_dir, 2, zip_time); strbuf_add_le(&zip_dir, 2, zip_date); strbuf_add_le(&zip_dir, 4, crc); strbuf_add_le(&zip_dir, 4, clamp32(compressed_size)); strbuf_add_le(&zip_dir, 4, clamp32(size)); strbuf_add_le(&zip_dir, 2, pathlen); strbuf_add_le(&zip_dir, 2, zip_dir_extra_size); strbuf_add_le(&zip_dir, 2, 0); /* comment length */ strbuf_add_le(&zip_dir, 2, 0); /* disk */ strbuf_add_le(&zip_dir, 2, !is_binary); strbuf_add_le(&zip_dir, 4, attr2); strbuf_add_le(&zip_dir, 4, clamp32(offset)); strbuf_add(&zip_dir, path, pathlen); strbuf_add(&zip_dir, &extra, ZIP_EXTRA_MTIME_SIZE); if (zip64_dir_extra_payload_size) { strbuf_add_le(&zip_dir, 2, 0x0001); /* magic */ strbuf_add_le(&zip_dir, 2, zip64_dir_extra_payload_size); if (size >= 0xffffffff) strbuf_add_le(&zip_dir, 8, size); if (compressed_size >= 0xffffffff) strbuf_add_le(&zip_dir, 8, compressed_size); if (offset >= 0xffffffff) strbuf_add_le(&zip_dir, 8, offset); } zip_dir_entries++; return 0; } static void write_zip64_trailer(void) { struct zip64_dir_trailer trailer64; struct zip64_dir_trailer_locator locator64; copy_le32(trailer64.magic, 0x06064b50); copy_le64(trailer64.record_size, ZIP64_DIR_TRAILER_RECORD_SIZE); copy_le16(trailer64.creator_version, max_creator_version); copy_le16(trailer64.version, 45); copy_le32(trailer64.disk, 0); copy_le32(trailer64.directory_start_disk, 0); copy_le64(trailer64.entries_on_this_disk, zip_dir_entries); copy_le64(trailer64.entries, zip_dir_entries); copy_le64(trailer64.size, zip_dir.len); copy_le64(trailer64.offset, zip_offset); copy_le32(locator64.magic, 0x07064b50); copy_le32(locator64.disk, 0); copy_le64(locator64.offset, zip_offset + zip_dir.len); copy_le32(locator64.number_of_disks, 1); write_or_die(1, &trailer64, ZIP64_DIR_TRAILER_SIZE); write_or_die(1, &locator64, ZIP64_DIR_TRAILER_LOCATOR_SIZE); } static void write_zip_trailer(const struct object_id *oid) { struct zip_dir_trailer trailer; int clamped = 0; copy_le32(trailer.magic, 0x06054b50); copy_le16(trailer.disk, 0); copy_le16(trailer.directory_start_disk, 0); copy_le16_clamp(trailer.entries_on_this_disk, zip_dir_entries, &clamped); copy_le16_clamp(trailer.entries, zip_dir_entries, &clamped); copy_le32(trailer.size, zip_dir.len); copy_le32_clamp(trailer.offset, zip_offset, &clamped); copy_le16(trailer.comment_length, oid ? the_hash_algo->hexsz : 0); write_or_die(1, zip_dir.buf, zip_dir.len); if (clamped) write_zip64_trailer(); write_or_die(1, &trailer, ZIP_DIR_TRAILER_SIZE); if (oid) write_or_die(1, oid_to_hex(oid), the_hash_algo->hexsz); } static void dos_time(timestamp_t *timestamp, int *dos_date, int *dos_time) { time_t time; struct tm tm; if (date_overflows(*timestamp)) die(_("timestamp too large for this system: %"PRItime), *timestamp); time = (time_t)*timestamp; localtime_r(&time, &tm); *timestamp = time; *dos_date = tm.tm_mday + (tm.tm_mon + 1) * 32 + (tm.tm_year + 1900 - 1980) * 512; *dos_time = tm.tm_sec / 2 + tm.tm_min * 32 + tm.tm_hour * 2048; } static int archive_zip_config(const char *var, const char *value, const struct config_context *ctx UNUSED, void *data UNUSED) { return userdiff_config(var, value); } static int write_zip_archive(const struct archiver *ar UNUSED, struct archiver_args *args) { int err; git_config(archive_zip_config, NULL); dos_time(&args->time, &zip_date, &zip_time); strbuf_init(&zip_dir, 0); err = write_archive_entries(args, write_zip_entry); if (!err) write_zip_trailer(args->commit_oid); strbuf_release(&zip_dir); return err; } static struct archiver zip_archiver = { .name = "zip", .write_archive = write_zip_archive, .flags = ARCHIVER_WANT_COMPRESSION_LEVELS|ARCHIVER_REMOTE, }; void init_zip_archiver(void) { register_archiver(&zip_archiver); } git-cinnabar-0.7.0/git-core/archive.c000064400000000000000000000524231046102023000154720ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "abspath.h" #include "config.h" #include "convert.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "object-name.h" #include "path.h" #include "pretty.h" #include "setup.h" #include "refs.h" #include "object-store-ll.h" #include "commit.h" #include "tree.h" #include "tree-walk.h" #include "attr.h" #include "archive.h" #include "parse-options.h" #include "unpack-trees.h" #include "quote.h" static char const * const archive_usage[] = { N_("git archive [] [...]"), "git archive --list", N_("git archive --remote [--exec ] [] [...]"), N_("git archive --remote [--exec ] --list"), NULL }; static const struct archiver **archivers; static int nr_archivers; static int alloc_archivers; static int remote_allow_unreachable; void register_archiver(struct archiver *ar) { ALLOC_GROW(archivers, nr_archivers + 1, alloc_archivers); archivers[nr_archivers++] = ar; } void init_archivers(void) { init_tar_archiver(); init_zip_archiver(); } static void format_subst(const struct commit *commit, const char *src, size_t len, struct strbuf *buf, struct pretty_print_context *ctx) { char *to_free = NULL; struct strbuf fmt = STRBUF_INIT; if (src == buf->buf) to_free = strbuf_detach(buf, NULL); for (;;) { const char *b, *c; b = memmem(src, len, "$Format:", 8); if (!b) break; c = memchr(b + 8, '$', (src + len) - b - 8); if (!c) break; strbuf_reset(&fmt); strbuf_add(&fmt, b + 8, c - b - 8); strbuf_add(buf, src, b - src); repo_format_commit_message(the_repository, commit, fmt.buf, buf, ctx); len -= c + 1 - src; src = c + 1; } strbuf_add(buf, src, len); strbuf_release(&fmt); free(to_free); } static void *object_file_to_archive(const struct archiver_args *args, const char *path, const struct object_id *oid, unsigned int mode, enum object_type *type, unsigned long *sizep) { void *buffer; const struct commit *commit = args->convert ? args->commit : NULL; struct checkout_metadata meta; init_checkout_metadata(&meta, args->refname, args->commit_oid ? args->commit_oid : (args->tree ? &args->tree->object.oid : NULL), oid); path += args->baselen; buffer = repo_read_object_file(the_repository, oid, type, sizep); if (buffer && S_ISREG(mode)) { struct strbuf buf = STRBUF_INIT; size_t size = 0; strbuf_attach(&buf, buffer, *sizep, *sizep + 1); convert_to_working_tree(args->repo->index, path, buf.buf, buf.len, &buf, &meta); if (commit) format_subst(commit, buf.buf, buf.len, &buf, args->pretty_ctx); buffer = strbuf_detach(&buf, &size); *sizep = size; } return buffer; } struct directory { struct directory *up; struct object_id oid; int baselen, len; unsigned mode; char path[FLEX_ARRAY]; }; struct archiver_context { struct archiver_args *args; write_archive_entry_fn_t write_entry; struct directory *bottom; }; static const struct attr_check *get_archive_attrs(struct index_state *istate, const char *path) { static struct attr_check *check; if (!check) check = attr_check_initl("export-ignore", "export-subst", NULL); git_check_attr(istate, path, check); return check; } static int check_attr_export_ignore(const struct attr_check *check) { return check && ATTR_TRUE(check->items[0].value); } static int check_attr_export_subst(const struct attr_check *check) { return check && ATTR_TRUE(check->items[1].value); } static int write_archive_entry(const struct object_id *oid, const char *base, int baselen, const char *filename, unsigned mode, void *context) { static struct strbuf path = STRBUF_INIT; struct archiver_context *c = context; struct archiver_args *args = c->args; write_archive_entry_fn_t write_entry = c->write_entry; int err; const char *path_without_prefix; unsigned long size; void *buffer; enum object_type type; args->convert = 0; strbuf_reset(&path); strbuf_grow(&path, PATH_MAX); strbuf_add(&path, args->base, args->baselen); strbuf_add(&path, base, baselen); strbuf_addstr(&path, filename); if (S_ISDIR(mode) || S_ISGITLINK(mode)) strbuf_addch(&path, '/'); path_without_prefix = path.buf + args->baselen; if (!S_ISDIR(mode)) { const struct attr_check *check; check = get_archive_attrs(args->repo->index, path_without_prefix); if (check_attr_export_ignore(check)) return 0; args->convert = check_attr_export_subst(check); } if (args->prefix) { static struct strbuf new_path = STRBUF_INIT; static struct strbuf buf = STRBUF_INIT; const char *rel; rel = relative_path(path_without_prefix, args->prefix, &buf); /* * We don't add an entry for the current working * directory when we are at the root; skip it also when * we're in a subdirectory or submodule. Skip entries * higher up as well. */ if (!strcmp(rel, "./") || starts_with(rel, "../")) return S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0; /* rel can refer to path, so don't edit it in place */ strbuf_reset(&new_path); strbuf_add(&new_path, args->base, args->baselen); strbuf_addstr(&new_path, rel); strbuf_swap(&path, &new_path); } if (args->verbose) fprintf(stderr, "%.*s\n", (int)path.len, path.buf); if (S_ISDIR(mode) || S_ISGITLINK(mode)) { err = write_entry(args, oid, path.buf, path.len, mode, NULL, 0); if (err) return err; return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0); } /* Stream it? */ if (S_ISREG(mode) && !args->convert && oid_object_info(args->repo, oid, &size) == OBJ_BLOB && size > big_file_threshold) return write_entry(args, oid, path.buf, path.len, mode, NULL, size); buffer = object_file_to_archive(args, path.buf, oid, mode, &type, &size); if (!buffer) return error(_("cannot read '%s'"), oid_to_hex(oid)); err = write_entry(args, oid, path.buf, path.len, mode, buffer, size); free(buffer); return err; } static void queue_directory(const struct object_id *oid, struct strbuf *base, const char *filename, unsigned mode, struct archiver_context *c) { struct directory *d; size_t len = st_add4(base->len, 1, strlen(filename), 1); d = xmalloc(st_add(sizeof(*d), len)); d->up = c->bottom; d->baselen = base->len; d->mode = mode; c->bottom = d; d->len = xsnprintf(d->path, len, "%.*s%s/", (int)base->len, base->buf, filename); oidcpy(&d->oid, oid); } static int write_directory(struct archiver_context *c) { struct directory *d = c->bottom; int ret; if (!d) return 0; c->bottom = d->up; d->path[d->len - 1] = '\0'; /* no trailing slash */ ret = write_directory(c) || write_archive_entry(&d->oid, d->path, d->baselen, d->path + d->baselen, d->mode, c) != READ_TREE_RECURSIVE; free(d); return ret ? -1 : 0; } static int queue_or_write_archive_entry(const struct object_id *oid, struct strbuf *base, const char *filename, unsigned mode, void *context) { struct archiver_context *c = context; while (c->bottom && !(base->len >= c->bottom->len && !strncmp(base->buf, c->bottom->path, c->bottom->len))) { struct directory *next = c->bottom->up; free(c->bottom); c->bottom = next; } if (S_ISDIR(mode)) { size_t baselen = base->len; const struct attr_check *check; /* Borrow base, but restore its original value when done. */ strbuf_addstr(base, filename); strbuf_addch(base, '/'); check = get_archive_attrs(c->args->repo->index, base->buf); strbuf_setlen(base, baselen); if (check_attr_export_ignore(check)) return 0; queue_directory(oid, base, filename, mode, c); return READ_TREE_RECURSIVE; } if (write_directory(c)) return -1; return write_archive_entry(oid, base->buf, base->len, filename, mode, context); } struct extra_file_info { char *base; struct stat stat; void *content; }; int write_archive_entries(struct archiver_args *args, write_archive_entry_fn_t write_entry) { struct archiver_context context; int err; struct strbuf path_in_archive = STRBUF_INIT; struct strbuf content = STRBUF_INIT; struct object_id fake_oid; int i; oidcpy(&fake_oid, null_oid()); if (args->baselen > 0 && args->base[args->baselen - 1] == '/') { size_t len = args->baselen; while (len > 1 && args->base[len - 2] == '/') len--; if (args->verbose) fprintf(stderr, "%.*s\n", (int)len, args->base); err = write_entry(args, &args->tree->object.oid, args->base, len, 040777, NULL, 0); if (err) return err; } memset(&context, 0, sizeof(context)); context.args = args; context.write_entry = write_entry; err = read_tree(args->repo, args->tree, &args->pathspec, queue_or_write_archive_entry, &context); if (err == READ_TREE_RECURSIVE) err = 0; while (context.bottom) { struct directory *next = context.bottom->up; free(context.bottom); context.bottom = next; } for (i = 0; i < args->extra_files.nr; i++) { struct string_list_item *item = args->extra_files.items + i; char *path = item->string; struct extra_file_info *info = item->util; put_be64(fake_oid.hash, i + 1); if (!info->content) { strbuf_reset(&path_in_archive); if (info->base) strbuf_addstr(&path_in_archive, info->base); strbuf_addstr(&path_in_archive, basename(path)); strbuf_reset(&content); if (strbuf_read_file(&content, path, info->stat.st_size) < 0) err = error_errno(_("cannot read '%s'"), path); else err = write_entry(args, &fake_oid, path_in_archive.buf, path_in_archive.len, canon_mode(info->stat.st_mode), content.buf, content.len); } else { err = write_entry(args, &fake_oid, path, strlen(path), canon_mode(info->stat.st_mode), info->content, info->stat.st_size); } if (err) break; } strbuf_release(&path_in_archive); strbuf_release(&content); return err; } static const struct archiver *lookup_archiver(const char *name) { int i; if (!name) return NULL; for (i = 0; i < nr_archivers; i++) { if (!strcmp(name, archivers[i]->name)) return archivers[i]; } return NULL; } struct path_exists_context { struct pathspec pathspec; struct archiver_args *args; }; static int reject_entry(const struct object_id *oid UNUSED, struct strbuf *base, const char *filename, unsigned mode, void *context) { int ret = -1; struct path_exists_context *ctx = context; if (S_ISDIR(mode)) { struct strbuf sb = STRBUF_INIT; strbuf_addbuf(&sb, base); strbuf_addstr(&sb, filename); if (!match_pathspec(ctx->args->repo->index, &ctx->pathspec, sb.buf, sb.len, 0, NULL, 1)) ret = READ_TREE_RECURSIVE; strbuf_release(&sb); } return ret; } static int reject_outside(const struct object_id *oid UNUSED, struct strbuf *base, const char *filename, unsigned mode, void *context) { struct archiver_args *args = context; struct strbuf buf = STRBUF_INIT; struct strbuf path = STRBUF_INIT; int ret = 0; if (S_ISDIR(mode)) return READ_TREE_RECURSIVE; strbuf_addbuf(&path, base); strbuf_addstr(&path, filename); if (starts_with(relative_path(path.buf, args->prefix, &buf), "../")) ret = -1; strbuf_release(&buf); strbuf_release(&path); return ret; } static int path_exists(struct archiver_args *args, const char *path) { const char *paths[] = { path, NULL }; struct path_exists_context ctx; int ret; ctx.args = args; parse_pathspec(&ctx.pathspec, 0, PATHSPEC_PREFER_CWD, args->prefix, paths); ctx.pathspec.recursive = 1; if (args->prefix && read_tree(args->repo, args->tree, &ctx.pathspec, reject_outside, args)) die(_("pathspec '%s' matches files outside the " "current directory"), path); ret = read_tree(args->repo, args->tree, &ctx.pathspec, reject_entry, &ctx); clear_pathspec(&ctx.pathspec); return ret != 0; } static void parse_pathspec_arg(const char **pathspec, struct archiver_args *ar_args) { /* * must be consistent with parse_pathspec in path_exists() * Also if pathspec patterns are dependent, we're in big * trouble as we test each one separately */ parse_pathspec(&ar_args->pathspec, 0, PATHSPEC_PREFER_CWD, ar_args->prefix, pathspec); ar_args->pathspec.recursive = 1; if (pathspec) { while (*pathspec) { if (**pathspec && !path_exists(ar_args, *pathspec)) die(_("pathspec '%s' did not match any files"), *pathspec); pathspec++; } } } static void parse_treeish_arg(const char **argv, struct archiver_args *ar_args, int remote) { const char *name = argv[0]; const struct object_id *commit_oid; time_t archive_time; struct tree *tree; const struct commit *commit; struct object_id oid; char *ref = NULL; /* Remotes are only allowed to fetch actual refs */ if (remote && !remote_allow_unreachable) { const char *colon = strchrnul(name, ':'); int refnamelen = colon - name; if (!repo_dwim_ref(the_repository, name, refnamelen, &oid, &ref, 0)) die(_("no such ref: %.*s"), refnamelen, name); } else { repo_dwim_ref(the_repository, name, strlen(name), &oid, &ref, 0); } if (repo_get_oid(the_repository, name, &oid)) die(_("not a valid object name: %s"), name); commit = lookup_commit_reference_gently(ar_args->repo, &oid, 1); if (commit) { commit_oid = &commit->object.oid; archive_time = commit->date; } else { commit_oid = NULL; archive_time = time(NULL); } if (ar_args->mtime_option) archive_time = approxidate(ar_args->mtime_option); tree = parse_tree_indirect(&oid); if (!tree) die(_("not a tree object: %s"), oid_to_hex(&oid)); /* * Setup index and instruct attr to read index only */ if (!ar_args->worktree_attributes) { struct unpack_trees_options opts; struct tree_desc t; memset(&opts, 0, sizeof(opts)); opts.index_only = 1; opts.head_idx = -1; opts.src_index = ar_args->repo->index; opts.dst_index = ar_args->repo->index; opts.fn = oneway_merge; init_tree_desc(&t, &tree->object.oid, tree->buffer, tree->size); if (unpack_trees(1, &t, &opts)) die(_("failed to unpack tree object %s"), oid_to_hex(&tree->object.oid)); git_attr_set_direction(GIT_ATTR_INDEX); } ar_args->refname = ref; ar_args->tree = tree; ar_args->commit_oid = commit_oid; ar_args->commit = commit; ar_args->time = archive_time; } static void extra_file_info_clear(void *util, const char *str UNUSED) { struct extra_file_info *info = util; free(info->base); free(info->content); free(info); } static int add_file_cb(const struct option *opt, const char *arg, int unset) { struct archiver_args *args = opt->value; const char **basep = (const char **)opt->defval; const char *base = *basep; char *path; struct string_list_item *item; struct extra_file_info *info; if (unset) { string_list_clear_func(&args->extra_files, extra_file_info_clear); return 0; } if (!arg) return -1; info = xmalloc(sizeof(*info)); info->base = xstrdup_or_null(base); if (!strcmp(opt->long_name, "add-file")) { path = prefix_filename(args->prefix, arg); if (stat(path, &info->stat)) die(_("File not found: %s"), path); if (!S_ISREG(info->stat.st_mode)) die(_("Not a regular file: %s"), path); info->content = NULL; /* read the file later */ } else if (!strcmp(opt->long_name, "add-virtual-file")) { struct strbuf buf = STRBUF_INIT; const char *p = arg; if (*p != '"') p = strchr(p, ':'); else if (unquote_c_style(&buf, p, &p) < 0) die(_("unclosed quote: '%s'"), arg); if (!p || *p != ':') die(_("missing colon: '%s'"), arg); if (p == arg) die(_("empty file name: '%s'"), arg); path = buf.len ? strbuf_detach(&buf, NULL) : xstrndup(arg, p - arg); if (args->prefix) { char *save = path; path = prefix_filename(args->prefix, path); free(save); } memset(&info->stat, 0, sizeof(info->stat)); info->stat.st_mode = S_IFREG | 0644; info->content = xstrdup(p + 1); info->stat.st_size = strlen(info->content); } else { BUG("add_file_cb() called for %s", opt->long_name); } item = string_list_append_nodup(&args->extra_files, path); item->util = info; return 0; } static int number_callback(const struct option *opt, const char *arg, int unset) { BUG_ON_OPT_NEG(unset); *(int *)opt->value = strtol(arg, NULL, 10); return 0; } static int parse_archive_args(int argc, const char **argv, const struct archiver **ar, struct archiver_args *args, const char *name_hint, int is_remote) { const char *format = NULL; const char *base = NULL; const char *remote = NULL; const char *exec = NULL; const char *output = NULL; const char *mtime_option = NULL; int compression_level = -1; int verbose = 0; int i; int list = 0; int worktree_attributes = 0; struct option opts[] = { OPT_GROUP(""), OPT_STRING(0, "format", &format, N_("fmt"), N_("archive format")), OPT_STRING(0, "prefix", &base, N_("prefix"), N_("prepend prefix to each pathname in the archive")), { OPTION_CALLBACK, 0, "add-file", args, N_("file"), N_("add untracked file to archive"), 0, add_file_cb, (intptr_t)&base }, { OPTION_CALLBACK, 0, "add-virtual-file", args, N_("path:content"), N_("add untracked file to archive"), 0, add_file_cb, (intptr_t)&base }, OPT_STRING('o', "output", &output, N_("file"), N_("write the archive to this file")), OPT_BOOL(0, "worktree-attributes", &worktree_attributes, N_("read .gitattributes in working directory")), OPT__VERBOSE(&verbose, N_("report archived files on stderr")), { OPTION_STRING, 0, "mtime", &mtime_option, N_("time"), N_("set modification time of archive entries"), PARSE_OPT_NONEG }, OPT_NUMBER_CALLBACK(&compression_level, N_("set compression level"), number_callback), OPT_GROUP(""), OPT_BOOL('l', "list", &list, N_("list supported archive formats")), OPT_GROUP(""), OPT_STRING(0, "remote", &remote, N_("repo"), N_("retrieve the archive from remote repository ")), OPT_STRING(0, "exec", &exec, N_("command"), N_("path to the remote git-upload-archive command")), OPT_END() }; argc = parse_options(argc, argv, NULL, opts, archive_usage, 0); if (remote) die(_("Unexpected option --remote")); if (exec) die(_("the option '%s' requires '%s'"), "--exec", "--remote"); if (output) die(_("Unexpected option --output")); if (is_remote && args->extra_files.nr) die(_("options '%s' and '%s' cannot be used together"), "--add-file", "--remote"); if (!base) base = ""; if (list) { if (argc) die(_("extra command line parameter '%s'"), *argv); for (i = 0; i < nr_archivers; i++) if (!is_remote || archivers[i]->flags & ARCHIVER_REMOTE) printf("%s\n", archivers[i]->name); exit(0); } if (!format && name_hint) format = archive_format_from_filename(name_hint); if (!format) format = "tar"; /* We need at least one parameter -- tree-ish */ if (argc < 1) usage_with_options(archive_usage, opts); *ar = lookup_archiver(format); if (!*ar || (is_remote && !((*ar)->flags & ARCHIVER_REMOTE))) die(_("Unknown archive format '%s'"), format); args->compression_level = Z_DEFAULT_COMPRESSION; if (compression_level != -1) { int levels_ok = (*ar)->flags & ARCHIVER_WANT_COMPRESSION_LEVELS; int high_ok = (*ar)->flags & ARCHIVER_HIGH_COMPRESSION_LEVELS; if (levels_ok && (compression_level <= 9 || high_ok)) args->compression_level = compression_level; else { die(_("Argument not supported for format '%s': -%d"), format, compression_level); } } args->verbose = verbose; args->base = base; args->baselen = strlen(base); args->worktree_attributes = worktree_attributes; args->mtime_option = mtime_option; return argc; } int write_archive(int argc, const char **argv, const char *prefix, struct repository *repo, const char *name_hint, int remote) { const struct archiver *ar = NULL; struct pretty_print_describe_status describe_status = {0}; struct pretty_print_context ctx = {0}; struct archiver_args args; const char **argv_copy; int rc; git_config_get_bool("uploadarchive.allowunreachable", &remote_allow_unreachable); git_config(git_default_config, NULL); describe_status.max_invocations = 1; ctx.date_mode.type = DATE_NORMAL; ctx.abbrev = DEFAULT_ABBREV; ctx.describe_status = &describe_status; args.pretty_ctx = &ctx; args.repo = repo; args.prefix = prefix; string_list_init_dup(&args.extra_files); /* * `parse_archive_args()` modifies contents of `argv`, which is what we * want. Our callers may not want it though, so we create a copy here. */ DUP_ARRAY(argv_copy, argv, argc); argv = argv_copy; argc = parse_archive_args(argc, argv, &ar, &args, name_hint, remote); if (!startup_info->have_repository) { /* * We know this will die() with an error, so we could just * die ourselves; but its error message will be more specific * than what we could write here. */ setup_git_directory(); } parse_treeish_arg(argv, &args, remote); parse_pathspec_arg(argv + 1, &args); rc = ar->write_archive(ar, &args); string_list_clear_func(&args.extra_files, extra_file_info_clear); free(args.refname); clear_pathspec(&args.pathspec); free(argv_copy); return rc; } static int match_extension(const char *filename, const char *ext) { int prefixlen = strlen(filename) - strlen(ext); /* * We need 1 character for the '.', and 1 character to ensure that the * prefix is non-empty (k.e., we don't match .tar.gz with no actual * filename). */ if (prefixlen < 2 || filename[prefixlen - 1] != '.') return 0; return !strcmp(filename + prefixlen, ext); } const char *archive_format_from_filename(const char *filename) { int i; for (i = 0; i < nr_archivers; i++) if (match_extension(filename, archivers[i]->name)) return archivers[i]->name; return NULL; } git-cinnabar-0.7.0/git-core/archive.h000064400000000000000000000030521046102023000154710ustar 00000000000000#ifndef ARCHIVE_H #define ARCHIVE_H #include "pathspec.h" #include "string-list.h" struct repository; struct pretty_print_context; struct archiver_args { struct repository *repo; char *refname; const char *prefix; const char *base; size_t baselen; struct tree *tree; const struct object_id *commit_oid; const struct commit *commit; const char *mtime_option; timestamp_t time; struct pathspec pathspec; unsigned int verbose : 1; unsigned int worktree_attributes : 1; unsigned int convert : 1; int compression_level; struct string_list extra_files; struct pretty_print_context *pretty_ctx; }; /* main api */ int write_archive(int argc, const char **argv, const char *prefix, struct repository *repo, const char *name_hint, int remote); const char *archive_format_from_filename(const char *filename); /* archive backend stuff */ #define ARCHIVER_WANT_COMPRESSION_LEVELS 1 #define ARCHIVER_REMOTE 2 #define ARCHIVER_HIGH_COMPRESSION_LEVELS 4 struct archiver { const char *name; int (*write_archive)(const struct archiver *, struct archiver_args *); unsigned flags; char *filter_command; }; void register_archiver(struct archiver *); void init_tar_archiver(void); void init_zip_archiver(void); void init_archivers(void); typedef int (*write_archive_entry_fn_t)(struct archiver_args *args, const struct object_id *oid, const char *path, size_t pathlen, unsigned int mode, void *buffer, unsigned long size); int write_archive_entries(struct archiver_args *args, write_archive_entry_fn_t write_entry); #endif /* ARCHIVE_H */ git-cinnabar-0.7.0/git-core/attr.c000064400000000000000000001034571046102023000150270ustar 00000000000000/* * Handle git attributes. See gitattributes(5) for a description of * the file syntax, and attr.h for a description of the API. * * One basic design decision here is that we are not going to support * an insanely large number of attributes. */ #define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "config.h" #include "environment.h" #include "exec-cmd.h" #include "attr.h" #include "dir.h" #include "gettext.h" #include "path.h" #include "utf8.h" #include "quote.h" #include "read-cache-ll.h" #include "refs.h" #include "revision.h" #include "object-store-ll.h" #include "setup.h" #include "thread-utils.h" #include "tree-walk.h" #include "object-name.h" char *git_attr_tree; const char git_attr__true[] = "(builtin)true"; const char git_attr__false[] = "\0(builtin)false"; static const char git_attr__unknown[] = "(builtin)unknown"; #define ATTR__TRUE git_attr__true #define ATTR__FALSE git_attr__false #define ATTR__UNSET NULL #define ATTR__UNKNOWN git_attr__unknown struct git_attr { unsigned int attr_nr; /* unique attribute number */ char name[FLEX_ARRAY]; /* attribute name */ }; const char *git_attr_name(const struct git_attr *attr) { return attr->name; } struct attr_hashmap { struct hashmap map; pthread_mutex_t mutex; }; static inline void hashmap_lock(struct attr_hashmap *map) { pthread_mutex_lock(&map->mutex); } static inline void hashmap_unlock(struct attr_hashmap *map) { pthread_mutex_unlock(&map->mutex); } /* The container for objects stored in "struct attr_hashmap" */ struct attr_hash_entry { struct hashmap_entry ent; const char *key; /* the key; memory should be owned by value */ size_t keylen; /* length of the key */ void *value; /* the stored value */ }; /* attr_hashmap comparison function */ static int attr_hash_entry_cmp(const void *cmp_data UNUSED, const struct hashmap_entry *eptr, const struct hashmap_entry *entry_or_key, const void *keydata UNUSED) { const struct attr_hash_entry *a, *b; a = container_of(eptr, const struct attr_hash_entry, ent); b = container_of(entry_or_key, const struct attr_hash_entry, ent); return (a->keylen != b->keylen) || strncmp(a->key, b->key, a->keylen); } /* * The global dictionary of all interned attributes. This * is a singleton object which is shared between threads. * Access to this dictionary must be surrounded with a mutex. */ static struct attr_hashmap g_attr_hashmap = { .map = HASHMAP_INIT(attr_hash_entry_cmp, NULL), }; /* * Retrieve the 'value' stored in a hashmap given the provided 'key'. * If there is no matching entry, return NULL. */ static void *attr_hashmap_get(struct attr_hashmap *map, const char *key, size_t keylen) { struct attr_hash_entry k; struct attr_hash_entry *e; hashmap_entry_init(&k.ent, memhash(key, keylen)); k.key = key; k.keylen = keylen; e = hashmap_get_entry(&map->map, &k, ent, NULL); return e ? e->value : NULL; } /* Add 'value' to a hashmap based on the provided 'key'. */ static void attr_hashmap_add(struct attr_hashmap *map, const char *key, size_t keylen, void *value) { struct attr_hash_entry *e; e = xmalloc(sizeof(struct attr_hash_entry)); hashmap_entry_init(&e->ent, memhash(key, keylen)); e->key = key; e->keylen = keylen; e->value = value; hashmap_add(&map->map, &e->ent); } struct all_attrs_item { const struct git_attr *attr; const char *value; /* * If 'macro' is non-NULL, indicates that 'attr' is a macro based on * the current attribute stack and contains a pointer to the match_attr * definition of the macro */ const struct match_attr *macro; }; /* * Reallocate and reinitialize the array of all attributes (which is used in * the attribute collection process) in 'check' based on the global dictionary * of attributes. */ static void all_attrs_init(struct attr_hashmap *map, struct attr_check *check) { int i; unsigned int size; hashmap_lock(map); size = hashmap_get_size(&map->map); if (size < check->all_attrs_nr) BUG("interned attributes shouldn't be deleted"); /* * If the number of attributes in the global dictionary has increased * (or this attr_check instance doesn't have an initialized all_attrs * field), reallocate the provided attr_check instance's all_attrs * field and fill each entry with its corresponding git_attr. */ if (size != check->all_attrs_nr) { struct attr_hash_entry *e; struct hashmap_iter iter; REALLOC_ARRAY(check->all_attrs, size); check->all_attrs_nr = size; hashmap_for_each_entry(&map->map, &iter, e, ent /* member name */) { const struct git_attr *a = e->value; check->all_attrs[a->attr_nr].attr = a; } } hashmap_unlock(map); /* * Re-initialize every entry in check->all_attrs. * This re-initialization can live outside of the locked region since * the attribute dictionary is no longer being accessed. */ for (i = 0; i < check->all_attrs_nr; i++) { check->all_attrs[i].value = ATTR__UNKNOWN; check->all_attrs[i].macro = NULL; } } /* * Attribute name cannot begin with "builtin_" which * is a reserved namespace for built in attributes values. */ static int attr_name_reserved(const char *name) { return starts_with(name, "builtin_"); } static int attr_name_valid(const char *name, size_t namelen) { /* * Attribute name cannot begin with '-' and must consist of * characters from [-A-Za-z0-9_.]. */ if (namelen <= 0 || *name == '-') return 0; while (namelen--) { char ch = *name++; if (! (ch == '-' || ch == '.' || ch == '_' || ('0' <= ch && ch <= '9') || ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z')) ) return 0; } return 1; } static void report_invalid_attr(const char *name, size_t len, const char *src, int lineno) { struct strbuf err = STRBUF_INIT; strbuf_addf(&err, _("%.*s is not a valid attribute name"), (int) len, name); fprintf(stderr, "%s: %s:%d\n", err.buf, src, lineno); strbuf_release(&err); } /* * Given a 'name', lookup and return the corresponding attribute in the global * dictionary. If no entry is found, create a new attribute and store it in * the dictionary. */ static const struct git_attr *git_attr_internal(const char *name, size_t namelen) { struct git_attr *a; if (!attr_name_valid(name, namelen)) return NULL; hashmap_lock(&g_attr_hashmap); a = attr_hashmap_get(&g_attr_hashmap, name, namelen); if (!a) { FLEX_ALLOC_MEM(a, name, name, namelen); a->attr_nr = hashmap_get_size(&g_attr_hashmap.map); attr_hashmap_add(&g_attr_hashmap, a->name, namelen, a); if (a->attr_nr != hashmap_get_size(&g_attr_hashmap.map) - 1) die(_("unable to add additional attribute")); } hashmap_unlock(&g_attr_hashmap); return a; } const struct git_attr *git_attr(const char *name) { return git_attr_internal(name, strlen(name)); } static const char blank[] = " \t\r\n"; /* Flags usable in read_attr() and parse_attr_line() family of functions. */ #define READ_ATTR_MACRO_OK (1<<0) #define READ_ATTR_NOFOLLOW (1<<1) /* * Parse a whitespace-delimited attribute state (i.e., "attr", * "-attr", "!attr", or "attr=value") from the string starting at src. * If e is not NULL, write the results to *e. Return a pointer to the * remainder of the string (with leading whitespace removed), or NULL * if there was an error. */ static const char *parse_attr(const char *src, int lineno, const char *cp, struct attr_state *e) { const char *ep, *equals; size_t len; ep = cp + strcspn(cp, blank); equals = strchr(cp, '='); if (equals && ep < equals) equals = NULL; if (equals) len = equals - cp; else len = ep - cp; if (!e) { if (*cp == '-' || *cp == '!') { cp++; len--; } if (!attr_name_valid(cp, len) || attr_name_reserved(cp)) { report_invalid_attr(cp, len, src, lineno); return NULL; } } else { /* * As this function is always called twice, once with * e == NULL in the first pass and then e != NULL in * the second pass, no need for attr_name_valid() * check here. */ if (*cp == '-' || *cp == '!') { e->setto = (*cp == '-') ? ATTR__FALSE : ATTR__UNSET; cp++; len--; } else if (!equals) e->setto = ATTR__TRUE; else { e->setto = xmemdupz(equals + 1, ep - equals - 1); } e->attr = git_attr_internal(cp, len); } return ep + strspn(ep, blank); } struct match_attr *parse_attr_line(const char *line, const char *src, int lineno, unsigned flags) { size_t namelen, num_attr, i; const char *cp, *name, *states; struct match_attr *res = NULL; int is_macro; struct strbuf pattern = STRBUF_INIT; cp = line + strspn(line, blank); if (!*cp || *cp == '#') return NULL; name = cp; if (strlen(line) >= ATTR_MAX_LINE_LENGTH) { warning(_("ignoring overly long attributes line %d"), lineno); return NULL; } if (*cp == '"' && !unquote_c_style(&pattern, name, &states)) { name = pattern.buf; namelen = pattern.len; } else { namelen = strcspn(name, blank); states = name + namelen; } if (strlen(ATTRIBUTE_MACRO_PREFIX) < namelen && starts_with(name, ATTRIBUTE_MACRO_PREFIX)) { if (!(flags & READ_ATTR_MACRO_OK)) { fprintf_ln(stderr, _("%s not allowed: %s:%d"), name, src, lineno); goto fail_return; } is_macro = 1; name += strlen(ATTRIBUTE_MACRO_PREFIX); name += strspn(name, blank); namelen = strcspn(name, blank); if (!attr_name_valid(name, namelen) || attr_name_reserved(name)) { report_invalid_attr(name, namelen, src, lineno); goto fail_return; } } else is_macro = 0; states += strspn(states, blank); /* First pass to count the attr_states */ for (cp = states, num_attr = 0; *cp; num_attr++) { cp = parse_attr(src, lineno, cp, NULL); if (!cp) goto fail_return; } res = xcalloc(1, st_add3(sizeof(*res), st_mult(sizeof(struct attr_state), num_attr), is_macro ? 0 : namelen + 1)); if (is_macro) { res->u.attr = git_attr_internal(name, namelen); } else { char *p = (char *)&(res->state[num_attr]); memcpy(p, name, namelen); res->u.pat.pattern = p; parse_path_pattern(&res->u.pat.pattern, &res->u.pat.patternlen, &res->u.pat.flags, &res->u.pat.nowildcardlen); if (res->u.pat.flags & PATTERN_FLAG_NEGATIVE) { warning(_("Negative patterns are ignored in git attributes\n" "Use '\\!' for literal leading exclamation.")); goto fail_return; } } res->is_macro = is_macro; res->num_attr = num_attr; /* Second pass to fill the attr_states */ for (cp = states, i = 0; *cp; i++) { cp = parse_attr(src, lineno, cp, &(res->state[i])); } strbuf_release(&pattern); return res; fail_return: strbuf_release(&pattern); free(res); return NULL; } /* * Like info/exclude and .gitignore, the attribute information can * come from many places. * * (1) .gitattributes file of the same directory; * (2) .gitattributes file of the parent directory if (1) does not have * any match; this goes recursively upwards, just like .gitignore. * (3) $GIT_DIR/info/attributes, which overrides both of the above. * * In the same file, later entries override the earlier match, so in the * global list, we would have entries from info/attributes the earliest * (reading the file from top to bottom), .gitattributes of the root * directory (again, reading the file from top to bottom) down to the * current directory, and then scan the list backwards to find the first match. * This is exactly the same as what is_excluded() does in dir.c to deal with * .gitignore file and info/excludes file as a fallback. */ struct attr_stack { struct attr_stack *prev; char *origin; size_t originlen; unsigned num_matches; unsigned alloc; struct match_attr **attrs; }; static void attr_stack_free(struct attr_stack *e) { unsigned i; free(e->origin); for (i = 0; i < e->num_matches; i++) { struct match_attr *a = e->attrs[i]; size_t j; for (j = 0; j < a->num_attr; j++) { const char *setto = a->state[j].setto; if (setto == ATTR__TRUE || setto == ATTR__FALSE || setto == ATTR__UNSET || setto == ATTR__UNKNOWN) ; else free((char *) setto); } free(a); } free(e->attrs); free(e); } static void drop_attr_stack(struct attr_stack **stack) { while (*stack) { struct attr_stack *elem = *stack; *stack = elem->prev; attr_stack_free(elem); } } /* List of all attr_check structs; access should be surrounded by mutex */ static struct check_vector { size_t nr; size_t alloc; struct attr_check **checks; pthread_mutex_t mutex; } check_vector; static inline void vector_lock(void) { pthread_mutex_lock(&check_vector.mutex); } static inline void vector_unlock(void) { pthread_mutex_unlock(&check_vector.mutex); } static void check_vector_add(struct attr_check *c) { vector_lock(); ALLOC_GROW(check_vector.checks, check_vector.nr + 1, check_vector.alloc); check_vector.checks[check_vector.nr++] = c; vector_unlock(); } static void check_vector_remove(struct attr_check *check) { int i; vector_lock(); /* Find entry */ for (i = 0; i < check_vector.nr; i++) if (check_vector.checks[i] == check) break; if (i >= check_vector.nr) BUG("no entry found"); /* shift entries over */ for (; i < check_vector.nr - 1; i++) check_vector.checks[i] = check_vector.checks[i + 1]; check_vector.nr--; vector_unlock(); } /* Iterate through all attr_check instances and drop their stacks */ static void drop_all_attr_stacks(void) { int i; vector_lock(); for (i = 0; i < check_vector.nr; i++) { drop_attr_stack(&check_vector.checks[i]->stack); } vector_unlock(); } struct attr_check *attr_check_alloc(void) { struct attr_check *c = xcalloc(1, sizeof(struct attr_check)); /* save pointer to the check struct */ check_vector_add(c); return c; } struct attr_check *attr_check_initl(const char *one, ...) { struct attr_check *check; int cnt; va_list params; const char *param; va_start(params, one); for (cnt = 1; (param = va_arg(params, const char *)) != NULL; cnt++) ; va_end(params); check = attr_check_alloc(); check->nr = cnt; check->alloc = cnt; CALLOC_ARRAY(check->items, cnt); check->items[0].attr = git_attr(one); va_start(params, one); for (cnt = 1; cnt < check->nr; cnt++) { const struct git_attr *attr; param = va_arg(params, const char *); if (!param) BUG("counted %d != ended at %d", check->nr, cnt); attr = git_attr(param); if (!attr) BUG("%s: not a valid attribute name", param); check->items[cnt].attr = attr; } va_end(params); return check; } struct attr_check *attr_check_dup(const struct attr_check *check) { struct attr_check *ret; if (!check) return NULL; ret = attr_check_alloc(); ret->nr = check->nr; ret->alloc = check->alloc; DUP_ARRAY(ret->items, check->items, ret->nr); return ret; } struct attr_check_item *attr_check_append(struct attr_check *check, const struct git_attr *attr) { struct attr_check_item *item; ALLOC_GROW(check->items, check->nr + 1, check->alloc); item = &check->items[check->nr++]; item->attr = attr; return item; } void attr_check_reset(struct attr_check *check) { check->nr = 0; } void attr_check_clear(struct attr_check *check) { FREE_AND_NULL(check->items); check->alloc = 0; check->nr = 0; FREE_AND_NULL(check->all_attrs); check->all_attrs_nr = 0; drop_attr_stack(&check->stack); } void attr_check_free(struct attr_check *check) { if (check) { /* Remove check from the check vector */ check_vector_remove(check); attr_check_clear(check); free(check); } } static const char *builtin_attr[] = { "[attr]binary -diff -merge -text", NULL, }; static void handle_attr_line(struct attr_stack *res, const char *line, const char *src, int lineno, unsigned flags) { struct match_attr *a; a = parse_attr_line(line, src, lineno, flags); if (!a) return; ALLOC_GROW_BY(res->attrs, res->num_matches, 1, res->alloc); res->attrs[res->num_matches - 1] = a; } static struct attr_stack *read_attr_from_array(const char **list) { struct attr_stack *res; const char *line; int lineno = 0; CALLOC_ARRAY(res, 1); while ((line = *(list++)) != NULL) handle_attr_line(res, line, "[builtin]", ++lineno, READ_ATTR_MACRO_OK); return res; } /* * Callers into the attribute system assume there is a single, system-wide * global state where attributes are read from and when the state is flipped by * calling git_attr_set_direction(), the stack frames that have been * constructed need to be discarded so that subsequent calls into the * attribute system will lazily read from the right place. Since changing * direction causes a global paradigm shift, it should not ever be called while * another thread could potentially be calling into the attribute system. */ static enum git_attr_direction direction; void git_attr_set_direction(enum git_attr_direction new_direction) { if (is_bare_repository() && new_direction != GIT_ATTR_INDEX) BUG("non-INDEX attr direction in a bare repo"); if (new_direction != direction) drop_all_attr_stacks(); direction = new_direction; } static struct attr_stack *read_attr_from_file(const char *path, unsigned flags) { struct strbuf buf = STRBUF_INIT; int fd; FILE *fp; struct attr_stack *res; int lineno = 0; struct stat st; if (flags & READ_ATTR_NOFOLLOW) fd = open_nofollow(path, O_RDONLY); else fd = open(path, O_RDONLY); if (fd < 0) { warn_on_fopen_errors(path); return NULL; } fp = xfdopen(fd, "r"); if (fstat(fd, &st)) { warning_errno(_("cannot fstat gitattributes file '%s'"), path); fclose(fp); return NULL; } if (st.st_size >= ATTR_MAX_FILE_SIZE) { warning(_("ignoring overly large gitattributes file '%s'"), path); fclose(fp); return NULL; } CALLOC_ARRAY(res, 1); while (strbuf_getline(&buf, fp) != EOF) { if (!lineno && starts_with(buf.buf, utf8_bom)) strbuf_remove(&buf, 0, strlen(utf8_bom)); handle_attr_line(res, buf.buf, path, ++lineno, flags); } fclose(fp); strbuf_release(&buf); return res; } static struct attr_stack *read_attr_from_buf(char *buf, size_t length, const char *path, unsigned flags) { struct attr_stack *res; char *sp; int lineno = 0; if (!buf) return NULL; if (length >= ATTR_MAX_FILE_SIZE) { warning(_("ignoring overly large gitattributes blob '%s'"), path); free(buf); return NULL; } CALLOC_ARRAY(res, 1); for (sp = buf; *sp;) { char *ep; int more; ep = strchrnul(sp, '\n'); more = (*ep == '\n'); *ep = '\0'; handle_attr_line(res, sp, path, ++lineno, flags); sp = ep + more; } free(buf); return res; } static struct attr_stack *read_attr_from_blob(struct index_state *istate, const struct object_id *tree_oid, const char *path, unsigned flags) { struct object_id oid; unsigned long sz; enum object_type type; void *buf; unsigned short mode; if (!tree_oid) return NULL; if (get_tree_entry(istate->repo, tree_oid, path, &oid, &mode)) return NULL; buf = repo_read_object_file(istate->repo, &oid, &type, &sz); if (!buf || type != OBJ_BLOB) { free(buf); return NULL; } return read_attr_from_buf(buf, sz, path, flags); } static struct attr_stack *read_attr_from_index(struct index_state *istate, const char *path, unsigned flags) { struct attr_stack *stack = NULL; char *buf; unsigned long size; int sparse_dir_pos = -1; if (!istate) return NULL; /* * When handling sparse-checkouts, .gitattributes files * may reside within a sparse directory. We distinguish * whether a path exists directly in the index or not by * evaluating if 'pos' is negative. * If 'pos' is negative, the path is not directly present * in the index and is likely within a sparse directory. * For paths not in the index, The absolute value of 'pos' * minus 1 gives us the position where the path would be * inserted in lexicographic order within the index. * We then subtract another 1 from this value * (sparse_dir_pos = -pos - 2) to find the position of the * last index entry which is lexicographically smaller than * the path. This would be the sparse directory containing * the path. By identifying the sparse directory containing * the path, we can correctly read the attributes specified * in the .gitattributes file from the tree object of the * sparse directory. */ if (!path_in_cone_mode_sparse_checkout(path, istate)) { int pos = index_name_pos_sparse(istate, path, strlen(path)); if (pos < 0) sparse_dir_pos = -pos - 2; } if (sparse_dir_pos >= 0 && S_ISSPARSEDIR(istate->cache[sparse_dir_pos]->ce_mode) && !strncmp(istate->cache[sparse_dir_pos]->name, path, ce_namelen(istate->cache[sparse_dir_pos]))) { const char *relative_path = path + ce_namelen(istate->cache[sparse_dir_pos]); stack = read_attr_from_blob(istate, &istate->cache[sparse_dir_pos]->oid, relative_path, flags); } else { buf = read_blob_data_from_index(istate, path, &size); if (buf) stack = read_attr_from_buf(buf, size, path, flags); } return stack; } static struct attr_stack *read_attr(struct index_state *istate, const struct object_id *tree_oid, const char *path, unsigned flags) { struct attr_stack *res = NULL; if (direction == GIT_ATTR_INDEX) { res = read_attr_from_index(istate, path, flags); } else if (tree_oid) { res = read_attr_from_blob(istate, tree_oid, path, flags); } else if (!is_bare_repository()) { if (direction == GIT_ATTR_CHECKOUT) { res = read_attr_from_index(istate, path, flags); if (!res) res = read_attr_from_file(path, flags); } else if (direction == GIT_ATTR_CHECKIN) { res = read_attr_from_file(path, flags); if (!res) /* * There is no checked out .gitattributes file * there, but we might have it in the index. * We allow operation in a sparsely checked out * work tree, so read from it. */ res = read_attr_from_index(istate, path, flags); } } if (!res) CALLOC_ARRAY(res, 1); return res; } const char *git_attr_system_file(void) { static const char *system_wide; if (!system_wide) system_wide = system_path(ETC_GITATTRIBUTES); return system_wide; } const char *git_attr_global_file(void) { if (!git_attributes_file) git_attributes_file = xdg_config_home("attributes"); return git_attributes_file; } int git_attr_system_is_enabled(void) { return !git_env_bool("GIT_ATTR_NOSYSTEM", 0); } static GIT_PATH_FUNC(git_path_info_attributes, INFOATTRIBUTES_FILE) static void push_stack(struct attr_stack **attr_stack_p, struct attr_stack *elem, char *origin, size_t originlen) { if (elem) { elem->origin = origin; if (origin) elem->originlen = originlen; elem->prev = *attr_stack_p; *attr_stack_p = elem; } } static void bootstrap_attr_stack(struct index_state *istate, const struct object_id *tree_oid, struct attr_stack **stack) { struct attr_stack *e; unsigned flags = READ_ATTR_MACRO_OK; if (*stack) return; /* builtin frame */ e = read_attr_from_array(builtin_attr); push_stack(stack, e, NULL, 0); /* system-wide frame */ if (git_attr_system_is_enabled()) { e = read_attr_from_file(git_attr_system_file(), flags); push_stack(stack, e, NULL, 0); } /* home directory */ if (git_attr_global_file()) { e = read_attr_from_file(git_attr_global_file(), flags); push_stack(stack, e, NULL, 0); } /* root directory */ e = read_attr(istate, tree_oid, GITATTRIBUTES_FILE, flags | READ_ATTR_NOFOLLOW); push_stack(stack, e, xstrdup(""), 0); /* info frame */ if (startup_info->have_repository) e = read_attr_from_file(git_path_info_attributes(), flags); else e = NULL; if (!e) CALLOC_ARRAY(e, 1); push_stack(stack, e, NULL, 0); } static void prepare_attr_stack(struct index_state *istate, const struct object_id *tree_oid, const char *path, int dirlen, struct attr_stack **stack) { struct attr_stack *info; struct strbuf pathbuf = STRBUF_INIT; /* * At the bottom of the attribute stack is the built-in * set of attribute definitions, followed by the contents * of $(prefix)/etc/gitattributes and a file specified by * core.attributesfile. Then, contents from * .gitattributes files from directories closer to the * root to the ones in deeper directories are pushed * to the stack. Finally, at the very top of the stack * we always keep the contents of $GIT_DIR/info/attributes. * * When checking, we use entries from near the top of the * stack, preferring $GIT_DIR/info/attributes, then * .gitattributes in deeper directories to shallower ones, * and finally use the built-in set as the default. */ bootstrap_attr_stack(istate, tree_oid, stack); /* * Pop the "info" one that is always at the top of the stack. */ info = *stack; *stack = info->prev; /* * Pop the ones from directories that are not the prefix of * the path we are checking. Break out of the loop when we see * the root one (whose origin is an empty string "") or the builtin * one (whose origin is NULL) without popping it. */ while ((*stack)->origin) { int namelen = (*stack)->originlen; struct attr_stack *elem; elem = *stack; if (namelen <= dirlen && !strncmp(elem->origin, path, namelen) && (!namelen || path[namelen] == '/')) break; *stack = elem->prev; attr_stack_free(elem); } /* * bootstrap_attr_stack() should have added, and the * above loop should have stopped before popping, the * root element whose attr_stack->origin is set to an * empty string. */ assert((*stack)->origin); strbuf_addstr(&pathbuf, (*stack)->origin); /* Build up to the directory 'path' is in */ while (pathbuf.len < dirlen) { size_t len = pathbuf.len; struct attr_stack *next; char *origin; /* Skip path-separator */ if (len < dirlen && is_dir_sep(path[len])) len++; /* Find the end of the next component */ while (len < dirlen && !is_dir_sep(path[len])) len++; if (pathbuf.len > 0) strbuf_addch(&pathbuf, '/'); strbuf_add(&pathbuf, path + pathbuf.len, (len - pathbuf.len)); strbuf_addf(&pathbuf, "/%s", GITATTRIBUTES_FILE); next = read_attr(istate, tree_oid, pathbuf.buf, READ_ATTR_NOFOLLOW); /* reset the pathbuf to not include "/.gitattributes" */ strbuf_setlen(&pathbuf, len); origin = xstrdup(pathbuf.buf); push_stack(stack, next, origin, len); } /* * Finally push the "info" one at the top of the stack. */ push_stack(stack, info, NULL, 0); strbuf_release(&pathbuf); } static int path_matches(const char *pathname, int pathlen, int basename_offset, const struct pattern *pat, const char *base, int baselen) { const char *pattern = pat->pattern; int prefix = pat->nowildcardlen; int isdir = (pathlen && pathname[pathlen - 1] == '/'); if ((pat->flags & PATTERN_FLAG_MUSTBEDIR) && !isdir) return 0; if (pat->flags & PATTERN_FLAG_NODIR) { return match_basename(pathname + basename_offset, pathlen - basename_offset - isdir, pattern, prefix, pat->patternlen, pat->flags); } return match_pathname(pathname, pathlen - isdir, base, baselen, pattern, prefix, pat->patternlen); } static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem); static int fill_one(struct all_attrs_item *all_attrs, const struct match_attr *a, int rem) { size_t i; for (i = a->num_attr; rem > 0 && i > 0; i--) { const struct git_attr *attr = a->state[i - 1].attr; const char **n = &(all_attrs[attr->attr_nr].value); const char *v = a->state[i - 1].setto; if (*n == ATTR__UNKNOWN) { *n = v; rem--; rem = macroexpand_one(all_attrs, attr->attr_nr, rem); } } return rem; } static int fill(const char *path, int pathlen, int basename_offset, const struct attr_stack *stack, struct all_attrs_item *all_attrs, int rem) { for (; rem > 0 && stack; stack = stack->prev) { unsigned i; const char *base = stack->origin ? stack->origin : ""; for (i = stack->num_matches; 0 < rem && 0 < i; i--) { const struct match_attr *a = stack->attrs[i - 1]; if (a->is_macro) continue; if (path_matches(path, pathlen, basename_offset, &a->u.pat, base, stack->originlen)) rem = fill_one(all_attrs, a, rem); } } return rem; } static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem) { const struct all_attrs_item *item = &all_attrs[nr]; if (item->macro && item->value == ATTR__TRUE) return fill_one(all_attrs, item->macro, rem); else return rem; } /* * Marks the attributes which are macros based on the attribute stack. * This prevents having to search through the attribute stack each time * a macro needs to be expanded during the fill stage. */ static void determine_macros(struct all_attrs_item *all_attrs, const struct attr_stack *stack) { for (; stack; stack = stack->prev) { unsigned i; for (i = stack->num_matches; i > 0; i--) { const struct match_attr *ma = stack->attrs[i - 1]; if (ma->is_macro) { unsigned int n = ma->u.attr->attr_nr; if (!all_attrs[n].macro) { all_attrs[n].macro = ma; } } } } } /* * Collect attributes for path into the array pointed to by check->all_attrs. * If check->check_nr is non-zero, only attributes in check[] are collected. * Otherwise all attributes are collected. */ static void collect_some_attrs(struct index_state *istate, const struct object_id *tree_oid, const char *path, struct attr_check *check) { int pathlen, rem, dirlen; const char *cp, *last_slash = NULL; int basename_offset; for (cp = path; *cp; cp++) { if (*cp == '/' && cp[1]) last_slash = cp; } pathlen = cp - path; if (last_slash) { basename_offset = last_slash + 1 - path; dirlen = last_slash - path; } else { basename_offset = 0; dirlen = 0; } prepare_attr_stack(istate, tree_oid, path, dirlen, &check->stack); all_attrs_init(&g_attr_hashmap, check); determine_macros(check->all_attrs, check->stack); rem = check->all_attrs_nr; fill(path, pathlen, basename_offset, check->stack, check->all_attrs, rem); } static const char *default_attr_source_tree_object_name; void set_git_attr_source(const char *tree_object_name) { default_attr_source_tree_object_name = xstrdup(tree_object_name); } static int compute_default_attr_source(struct object_id *attr_source) { int ignore_bad_attr_tree = 0; if (!default_attr_source_tree_object_name) default_attr_source_tree_object_name = getenv(GIT_ATTR_SOURCE_ENVIRONMENT); if (!default_attr_source_tree_object_name && git_attr_tree) { default_attr_source_tree_object_name = git_attr_tree; ignore_bad_attr_tree = 1; } if (!default_attr_source_tree_object_name) return 0; if (!startup_info->have_repository) { if (!ignore_bad_attr_tree) die(_("cannot use --attr-source or GIT_ATTR_SOURCE without repo")); return 0; } if (repo_get_oid_treeish(the_repository, default_attr_source_tree_object_name, attr_source)) { if (!ignore_bad_attr_tree) die(_("bad --attr-source or GIT_ATTR_SOURCE")); return 0; } return 1; } static struct object_id *default_attr_source(void) { static struct object_id attr_source; static int has_attr_source = -1; if (has_attr_source < 0) has_attr_source = compute_default_attr_source(&attr_source); if (!has_attr_source) return NULL; return &attr_source; } static const char *interned_mode_string(unsigned int mode) { static struct { unsigned int val; char str[7]; } mode_string[] = { { .val = 0040000 }, { .val = 0100644 }, { .val = 0100755 }, { .val = 0120000 }, { .val = 0160000 }, }; int i; for (i = 0; i < ARRAY_SIZE(mode_string); i++) { if (mode_string[i].val != mode) continue; if (!*mode_string[i].str) snprintf(mode_string[i].str, sizeof(mode_string[i].str), "%06o", mode); return mode_string[i].str; } BUG("Unsupported mode 0%o", mode); } static const char *builtin_object_mode_attr(struct index_state *istate, const char *path) { unsigned int mode; if (direction == GIT_ATTR_CHECKIN) { struct object_id oid; struct stat st; if (lstat(path, &st)) die_errno(_("unable to stat '%s'"), path); mode = canon_mode(st.st_mode); if (S_ISDIR(mode)) { /* *`path` is either a directory or it is a submodule, * in which case it is already indexed as submodule * or it does not exist in the index yet and we need to * check if we can resolve to a ref. */ int pos = index_name_pos(istate, path, strlen(path)); if (pos >= 0) { if (S_ISGITLINK(istate->cache[pos]->ce_mode)) mode = istate->cache[pos]->ce_mode; } else if (repo_resolve_gitlink_ref(the_repository, path, "HEAD", &oid) == 0) { mode = S_IFGITLINK; } } } else { /* * For GIT_ATTR_CHECKOUT and GIT_ATTR_INDEX we only check * for mode in the index. */ int pos = index_name_pos(istate, path, strlen(path)); if (pos >= 0) mode = istate->cache[pos]->ce_mode; else return ATTR__UNSET; } return interned_mode_string(mode); } static const char *compute_builtin_attr(struct index_state *istate, const char *path, const struct git_attr *attr) { static const struct git_attr *object_mode_attr; if (!object_mode_attr) object_mode_attr = git_attr("builtin_objectmode"); if (attr == object_mode_attr) return builtin_object_mode_attr(istate, path); return ATTR__UNSET; } void git_check_attr(struct index_state *istate, const char *path, struct attr_check *check) { int i; const struct object_id *tree_oid = default_attr_source(); collect_some_attrs(istate, tree_oid, path, check); for (i = 0; i < check->nr; i++) { unsigned int n = check->items[i].attr->attr_nr; const char *value = check->all_attrs[n].value; if (value == ATTR__UNKNOWN) value = compute_builtin_attr(istate, path, check->all_attrs[n].attr); check->items[i].value = value; } } void git_all_attrs(struct index_state *istate, const char *path, struct attr_check *check) { int i; const struct object_id *tree_oid = default_attr_source(); attr_check_reset(check); collect_some_attrs(istate, tree_oid, path, check); for (i = 0; i < check->all_attrs_nr; i++) { const char *name = check->all_attrs[i].attr->name; const char *value = check->all_attrs[i].value; struct attr_check_item *item; if (value == ATTR__UNSET || value == ATTR__UNKNOWN) continue; item = attr_check_append(check, git_attr(name)); item->value = value; } } void attr_start(void) { pthread_mutex_init(&g_attr_hashmap.mutex, NULL); pthread_mutex_init(&check_vector.mutex, NULL); } git-cinnabar-0.7.0/git-core/attr.h000064400000000000000000000201361046102023000150240ustar 00000000000000#ifndef ATTR_H #define ATTR_H /** * gitattributes mechanism gives a uniform way to associate various attributes * to set of paths. * * * Querying Specific Attributes * ---------------------------- * * - Prepare `struct attr_check` using attr_check_initl() function, enumerating * the names of attributes whose values you are interested in, terminated with * a NULL pointer. Alternatively, an empty `struct attr_check` can be * prepared by calling `attr_check_alloc()` function and then attributes you * want to ask about can be added to it with `attr_check_append()` function. * * - Call `git_check_attr()` to check the attributes for the path. * * - Inspect `attr_check` structure to see how each of the attribute in the * array is defined for the path. * * * Example * ------- * * To see how attributes "crlf" and "ident" are set for different paths. * * - Prepare a `struct attr_check` with two elements (because we are checking * two attributes): * * ------------ * static struct attr_check *check; * static void setup_check(void) * { * if (check) * return; // already done * check = attr_check_initl("crlf", "ident", NULL); * } * ------------ * * - Call `git_check_attr()` with the prepared `struct attr_check`: * * ------------ * const char *path; * * setup_check(); * git_check_attr(&the_index, path, check); * ------------ * * - Act on `.value` member of the result, left in `check->items[]`: * * ------------ * const char *value = check->items[0].value; * * if (ATTR_TRUE(value)) { * The attribute is Set, by listing only the name of the * attribute in the gitattributes file for the path. * } else if (ATTR_FALSE(value)) { * The attribute is Unset, by listing the name of the * attribute prefixed with a dash - for the path. * } else if (ATTR_UNSET(value)) { * The attribute is neither set nor unset for the path. * } else if (!strcmp(value, "input")) { * If none of ATTR_TRUE(), ATTR_FALSE(), or ATTR_UNSET() is * true, the value is a string set in the gitattributes * file for the path by saying "attr=value". * } else if (... other check using value as string ...) { * ... * } * ------------ * * To see how attributes in argv[] are set for different paths, only * the first step in the above would be different. * * ------------ * static struct attr_check *check; * static void setup_check(const char **argv) * { * check = attr_check_alloc(); * while (*argv) { * struct git_attr *attr = git_attr(*argv); * attr_check_append(check, attr); * argv++; * } * } * ------------ * * * Querying All Attributes * ----------------------- * * To get the values of all attributes associated with a file: * * - Prepare an empty `attr_check` structure by calling `attr_check_alloc()`. * * - Call `git_all_attrs()`, which populates the `attr_check` with the * attributes attached to the path. * * - Iterate over the `attr_check.items[]` array to examine the attribute * names and values. The name of the attribute described by an * `attr_check.items[]` object can be retrieved via * `git_attr_name(check->items[i].attr)`. (Please note that no items will be * returned for unset attributes, so `ATTR_UNSET()` will return false for all * returned `attr_check.items[]` objects.) * * - Free the `attr_check` struct by calling `attr_check_free()`. */ /** * The maximum line length for a gitattributes file. If the line exceeds this * length we will ignore it. */ #define ATTR_MAX_LINE_LENGTH 2048 /** * The maximum size of the giattributes file. If the file exceeds this size we * will ignore it. */ #define ATTR_MAX_FILE_SIZE (100 * 1024 * 1024) struct index_state; /** * An attribute is an opaque object that is identified by its name. Pass the * name to `git_attr()` function to obtain the object of this type. * The internal representation of this structure is of no interest to the * calling programs. The name of the attribute can be retrieved by calling * `git_attr_name()`. */ struct git_attr; /* opaque structures used internally for attribute collection */ struct all_attrs_item; struct attr_stack; /* * The textual object name for the tree-ish used by git_check_attr() * to read attributes from (instead of from the working tree). */ void set_git_attr_source(const char *); /* * Given a string, return the gitattribute object that * corresponds to it. */ const struct git_attr *git_attr(const char *); /* Internal use */ extern const char git_attr__true[]; extern const char git_attr__false[]; /** * Attribute Values * ---------------- * * An attribute for a path can be in one of four states: Set, Unset, Unspecified * or set to a string, and `.value` member of `struct attr_check_item` records * it. The three macros check these, if none of them returns true, `.value` * member points at a string value of the attribute for the path. */ /* Returns true if the attribute is Set for the path. */ #define ATTR_TRUE(v) ((v) == git_attr__true) /* Returns true if the attribute is Unset for the path. */ #define ATTR_FALSE(v) ((v) == git_attr__false) /* Returns true if the attribute is Unspecified for the path. */ #define ATTR_UNSET(v) ((v) == NULL) /* This structure represents one attribute and its value. */ struct attr_check_item { const struct git_attr *attr; const char *value; }; /** * This structure represents a collection of `attr_check_item`. It is passed to * `git_check_attr()` function, specifying the attributes to check, and * receives their values. */ struct attr_check { int nr; int alloc; struct attr_check_item *items; int all_attrs_nr; struct all_attrs_item *all_attrs; struct attr_stack *stack; }; struct attr_check *attr_check_alloc(void); LAST_ARG_MUST_BE_NULL struct attr_check *attr_check_initl(const char *, ...); struct attr_check *attr_check_dup(const struct attr_check *check); struct attr_check_item *attr_check_append(struct attr_check *check, const struct git_attr *attr); void attr_check_reset(struct attr_check *check); void attr_check_clear(struct attr_check *check); void attr_check_free(struct attr_check *check); /* * Return the name of the attribute represented by the argument. The * return value is a pointer to a null-delimited string that is part * of the internal data structure; it should not be modified or freed. */ const char *git_attr_name(const struct git_attr *); void git_check_attr(struct index_state *istate, const char *path, struct attr_check *check); /* * Retrieve all attributes that apply to the specified path. * check holds the attributes and their values. */ void git_all_attrs(struct index_state *istate, const char *path, struct attr_check *check); enum git_attr_direction { GIT_ATTR_CHECKIN, GIT_ATTR_CHECKOUT, GIT_ATTR_INDEX }; void git_attr_set_direction(enum git_attr_direction new_direction); void attr_start(void); /* Return the system gitattributes file. */ const char *git_attr_system_file(void); /* Return the global gitattributes file, if any. */ const char *git_attr_global_file(void); /* Return whether the system gitattributes file is enabled and should be used. */ int git_attr_system_is_enabled(void); extern char *git_attr_tree; /* * Exposed for fuzz-testing only. */ /* What does a matched pattern decide? */ struct attr_state { const struct git_attr *attr; const char *setto; }; struct pattern { const char *pattern; int patternlen; int nowildcardlen; unsigned flags; /* PATTERN_FLAG_* */ }; /* * One rule, as from a .gitattributes file. * * If is_macro is true, then u.attr is a pointer to the git_attr being * defined. * * If is_macro is false, then u.pat is the filename pattern to which the * rule applies. * * In either case, num_attr is the number of attributes affected by * this rule, and state is an array listing them. The attributes are * listed as they appear in the file (macros unexpanded). */ struct match_attr { union { struct pattern pat; const struct git_attr *attr; } u; char is_macro; size_t num_attr; struct attr_state state[FLEX_ARRAY]; }; struct match_attr *parse_attr_line(const char *line, const char *src, int lineno, unsigned flags); #endif /* ATTR_H */ git-cinnabar-0.7.0/git-core/banned.h000064400000000000000000000020731046102023000153010ustar 00000000000000#ifndef BANNED_H #define BANNED_H /* * This header lists functions that have been banned from our code base, * because they're too easy to misuse (and even if used correctly, * complicate audits). Including this header turns them into compile-time * errors. */ #define BANNED(func) sorry_##func##_is_a_banned_function #undef strcpy #define strcpy(x,y) BANNED(strcpy) #undef strcat #define strcat(x,y) BANNED(strcat) #undef strncpy #define strncpy(x,y,n) BANNED(strncpy) #undef strncat #define strncat(x,y,n) BANNED(strncat) #undef strtok #define strtok(x,y) BANNED(strtok) #undef strtok_r #define strtok_r(x,y,z) BANNED(strtok_r) #undef sprintf #undef vsprintf #define sprintf(...) BANNED(sprintf) #define vsprintf(...) BANNED(vsprintf) #undef gmtime #define gmtime(t) BANNED(gmtime) #undef localtime #define localtime(t) BANNED(localtime) #undef ctime #define ctime(t) BANNED(ctime) #undef ctime_r #define ctime_r(t, buf) BANNED(ctime_r) #undef asctime #define asctime(t) BANNED(asctime) #undef asctime_r #define asctime_r(t, buf) BANNED(asctime_r) #endif /* BANNED_H */ git-cinnabar-0.7.0/git-core/base85.c000064400000000000000000000054461046102023000151430ustar 00000000000000#include "git-compat-util.h" #include "base85.h" #undef DEBUG_85 #ifdef DEBUG_85 #define say(a) fprintf(stderr, a) #define say1(a,b) fprintf(stderr, a, b) #define say2(a,b,c) fprintf(stderr, a, b, c) #else #define say(a) do { /* nothing */ } while (0) #define say1(a,b) do { /* nothing */ } while (0) #define say2(a,b,c) do { /* nothing */ } while (0) #endif static const char en85[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=', '>', '?', '@', '^', '_', '`', '{', '|', '}', '~' }; static char de85[256]; static void prep_base85(void) { if (de85['Z']) return; for (size_t i = 0; i < ARRAY_SIZE(en85); i++) { int ch = en85[i]; de85[ch] = i + 1; } } int decode_85(char *dst, const char *buffer, int len) { prep_base85(); say2("decode 85 <%.*s>", len / 4 * 5, buffer); while (len) { unsigned acc = 0; int de, cnt = 4; unsigned char ch; do { ch = *buffer++; de = de85[ch]; if (--de < 0) return error("invalid base85 alphabet %c", ch); acc = acc * 85 + de; } while (--cnt); ch = *buffer++; de = de85[ch]; if (--de < 0) return error("invalid base85 alphabet %c", ch); /* Detect overflow. */ if (0xffffffff / 85 < acc || 0xffffffff - de < (acc *= 85)) return error("invalid base85 sequence %.5s", buffer-5); acc += de; say1(" %08x", acc); cnt = (len < 4) ? len : 4; len -= cnt; do { acc = (acc << 8) | (acc >> 24); *dst++ = acc; } while (--cnt); } say("\n"); return 0; } void encode_85(char *buf, const unsigned char *data, int bytes) { say("encode 85"); while (bytes) { unsigned acc = 0; int cnt; for (cnt = 24; cnt >= 0; cnt -= 8) { unsigned ch = *data++; acc |= ch << cnt; if (--bytes == 0) break; } say1(" %08x", acc); for (cnt = 4; cnt >= 0; cnt--) { int val = acc % 85; acc /= 85; buf[cnt] = en85[val]; } buf += 5; } say("\n"); *buf = 0; } #ifdef DEBUG_85 int main(int ac, char **av) { char buf[1024]; if (!strcmp(av[1], "-e")) { int len = strlen(av[2]); encode_85(buf, av[2], len); if (len <= 26) len = len + 'A' - 1; else len = len + 'a' - 26 - 1; printf("encoded: %c%s\n", len, buf); return 0; } if (!strcmp(av[1], "-d")) { int len = *av[2]; if ('A' <= len && len <= 'Z') len = len - 'A' + 1; else len = len - 'a' + 26 + 1; decode_85(buf, av[2]+1, len); printf("decoded: %.*s\n", len, buf); return 0; } if (!strcmp(av[1], "-t")) { char t[4] = { -1,-1,-1,-1 }; encode_85(buf, t, 4); printf("encoded: D%s\n", buf); return 0; } } #endif git-cinnabar-0.7.0/git-core/base85.h000064400000000000000000000002641046102023000151410ustar 00000000000000#ifndef BASE85_H #define BASE85_H int decode_85(char *dst, const char *line, int linelen); void encode_85(char *buf, const unsigned char *data, int bytes); #endif /* BASE85_H */ git-cinnabar-0.7.0/git-core/bisect.c000064400000000000000000000762101046102023000153220ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "config.h" #include "commit.h" #include "diff.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "revision.h" #include "refs.h" #include "list-objects.h" #include "quote.h" #include "run-command.h" #include "log-tree.h" #include "bisect.h" #include "oid-array.h" #include "strvec.h" #include "commit-slab.h" #include "commit-reach.h" #include "object-name.h" #include "object-store-ll.h" #include "path.h" #include "dir.h" static struct oid_array good_revs; static struct oid_array skipped_revs; static struct object_id *current_bad_oid; static char *term_bad; static char *term_good; /* Remember to update object flag allocation in object.h */ #define COUNTED (1u<<16) /* * This is a truly stupid algorithm, but it's only * used for bisection, and we just don't care enough. * * We care just barely enough to avoid recursing for * non-merge entries. */ static int count_distance(struct commit_list *entry) { int nr = 0; while (entry) { struct commit *commit = entry->item; struct commit_list *p; if (commit->object.flags & (UNINTERESTING | COUNTED)) break; if (!(commit->object.flags & TREESAME)) nr++; commit->object.flags |= COUNTED; p = commit->parents; entry = p; if (p) { p = p->next; while (p) { nr += count_distance(p); p = p->next; } } } return nr; } static void clear_distance(struct commit_list *list) { while (list) { struct commit *commit = list->item; commit->object.flags &= ~COUNTED; list = list->next; } } define_commit_slab(commit_weight, int *); static struct commit_weight commit_weight; #define DEBUG_BISECT 0 static inline int weight(struct commit_list *elem) { return **commit_weight_at(&commit_weight, elem->item); } static inline void weight_set(struct commit_list *elem, int weight) { **commit_weight_at(&commit_weight, elem->item) = weight; } static int count_interesting_parents(struct commit *commit, unsigned bisect_flags) { struct commit_list *p; int count; for (count = 0, p = commit->parents; p; p = p->next) { if (!(p->item->object.flags & UNINTERESTING)) count++; if (bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY) break; } return count; } static inline int approx_halfway(struct commit_list *p, int nr) { int diff; /* * Don't short-cut something we are not going to return! */ if (p->item->object.flags & TREESAME) return 0; if (DEBUG_BISECT) return 0; /* * For small number of commits 2 and 3 are halfway of 5, and * 3 is halfway of 6 but 2 and 4 are not. */ diff = 2 * weight(p) - nr; switch (diff) { case -1: case 0: case 1: return 1; default: /* * For large number of commits we are not so strict, it's * good enough if it's within ~0.1% of the halfway point, * e.g. 5000 is exactly halfway of 10000, but we consider * the values [4996, 5004] as halfway as well. */ if (abs(diff) < nr / 1024) return 1; return 0; } } static void show_list(const char *debug, int counted, int nr, struct commit_list *list) { struct commit_list *p; if (!DEBUG_BISECT) return; fprintf(stderr, "%s (%d/%d)\n", debug, counted, nr); for (p = list; p; p = p->next) { struct commit_list *pp; struct commit *commit = p->item; unsigned commit_flags = commit->object.flags; enum object_type type; unsigned long size; char *buf = repo_read_object_file(the_repository, &commit->object.oid, &type, &size); const char *subject_start; int subject_len; if (!buf) die(_("unable to read %s"), oid_to_hex(&commit->object.oid)); fprintf(stderr, "%c%c%c ", (commit_flags & TREESAME) ? ' ' : 'T', (commit_flags & UNINTERESTING) ? 'U' : ' ', (commit_flags & COUNTED) ? 'C' : ' '); if (*commit_weight_at(&commit_weight, p->item)) fprintf(stderr, "%3d", weight(p)); else fprintf(stderr, "---"); fprintf(stderr, " %.*s", 8, oid_to_hex(&commit->object.oid)); for (pp = commit->parents; pp; pp = pp->next) fprintf(stderr, " %.*s", 8, oid_to_hex(&pp->item->object.oid)); subject_len = find_commit_subject(buf, &subject_start); if (subject_len) fprintf(stderr, " %.*s", subject_len, subject_start); fprintf(stderr, "\n"); } } static struct commit_list *best_bisection(struct commit_list *list, int nr) { struct commit_list *p, *best; int best_distance = -1; best = list; for (p = list; p; p = p->next) { int distance; unsigned commit_flags = p->item->object.flags; if (commit_flags & TREESAME) continue; distance = weight(p); if (nr - distance < distance) distance = nr - distance; if (distance > best_distance) { best = p; best_distance = distance; } } return best; } struct commit_dist { struct commit *commit; int distance; }; static int compare_commit_dist(const void *a_, const void *b_) { struct commit_dist *a, *b; a = (struct commit_dist *)a_; b = (struct commit_dist *)b_; if (a->distance != b->distance) return b->distance - a->distance; /* desc sort */ return oidcmp(&a->commit->object.oid, &b->commit->object.oid); } static struct commit_list *best_bisection_sorted(struct commit_list *list, int nr) { struct commit_list *p; struct commit_dist *array = xcalloc(nr, sizeof(*array)); struct strbuf buf = STRBUF_INIT; int cnt, i; for (p = list, cnt = 0; p; p = p->next) { int distance; unsigned commit_flags = p->item->object.flags; if (commit_flags & TREESAME) continue; distance = weight(p); if (nr - distance < distance) distance = nr - distance; array[cnt].commit = p->item; array[cnt].distance = distance; cnt++; } QSORT(array, cnt, compare_commit_dist); for (p = list, i = 0; i < cnt; i++) { struct object *obj = &(array[i].commit->object); strbuf_reset(&buf); strbuf_addf(&buf, "dist=%d", array[i].distance); add_name_decoration(DECORATION_NONE, buf.buf, obj); p->item = array[i].commit; if (i < cnt - 1) p = p->next; } if (p) { free_commit_list(p->next); p->next = NULL; } strbuf_release(&buf); free(array); return list; } /* * zero or positive weight is the number of interesting commits it can * reach, including itself. Especially, weight = 0 means it does not * reach any tree-changing commits (e.g. just above uninteresting one * but traversal is with pathspec). * * weight = -1 means it has one parent and its distance is yet to * be computed. * * weight = -2 means it has more than one parent and its distance is * unknown. After running count_distance() first, they will get zero * or positive distance. */ static struct commit_list *do_find_bisection(struct commit_list *list, int nr, int *weights, unsigned bisect_flags) { int n, counted; struct commit_list *p; counted = 0; for (n = 0, p = list; p; p = p->next) { struct commit *commit = p->item; unsigned commit_flags = commit->object.flags; *commit_weight_at(&commit_weight, p->item) = &weights[n++]; switch (count_interesting_parents(commit, bisect_flags)) { case 0: if (!(commit_flags & TREESAME)) { weight_set(p, 1); counted++; show_list("bisection 2 count one", counted, nr, list); } /* * otherwise, it is known not to reach any * tree-changing commit and gets weight 0. */ break; case 1: weight_set(p, -1); break; default: weight_set(p, -2); break; } } show_list("bisection 2 initialize", counted, nr, list); /* * If you have only one parent in the resulting set * then you can reach one commit more than that parent * can reach. So we do not have to run the expensive * count_distance() for single strand of pearls. * * However, if you have more than one parents, you cannot * just add their distance and one for yourself, since * they usually reach the same ancestor and you would * end up counting them twice that way. * * So we will first count distance of merges the usual * way, and then fill the blanks using cheaper algorithm. */ for (p = list; p; p = p->next) { if (p->item->object.flags & UNINTERESTING) continue; if (weight(p) != -2) continue; if (bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY) BUG("shouldn't be calling count-distance in fp mode"); weight_set(p, count_distance(p)); clear_distance(list); /* Does it happen to be at half-way? */ if (!(bisect_flags & FIND_BISECTION_ALL) && approx_halfway(p, nr)) return p; counted++; } show_list("bisection 2 count_distance", counted, nr, list); while (counted < nr) { for (p = list; p; p = p->next) { struct commit_list *q; unsigned commit_flags = p->item->object.flags; if (0 <= weight(p)) continue; for (q = p->item->parents; q; q = bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY ? NULL : q->next) { if (q->item->object.flags & UNINTERESTING) continue; if (0 <= weight(q)) break; } if (!q) continue; /* * weight for p is unknown but q is known. * add one for p itself if p is to be counted, * otherwise inherit it from q directly. */ if (!(commit_flags & TREESAME)) { weight_set(p, weight(q)+1); counted++; show_list("bisection 2 count one", counted, nr, list); } else weight_set(p, weight(q)); /* Does it happen to be at half-way? */ if (!(bisect_flags & FIND_BISECTION_ALL) && approx_halfway(p, nr)) return p; } } show_list("bisection 2 counted all", counted, nr, list); if (!(bisect_flags & FIND_BISECTION_ALL)) return best_bisection(list, nr); else return best_bisection_sorted(list, nr); } void find_bisection(struct commit_list **commit_list, int *reaches, int *all, unsigned bisect_flags) { int nr, on_list; struct commit_list *list, *p, *best, *next, *last; int *weights; show_list("bisection 2 entry", 0, 0, *commit_list); init_commit_weight(&commit_weight); /* * Count the number of total and tree-changing items on the * list, while reversing the list. */ for (nr = on_list = 0, last = NULL, p = *commit_list; p; p = next) { unsigned commit_flags = p->item->object.flags; next = p->next; if (commit_flags & UNINTERESTING) { free(p); continue; } p->next = last; last = p; if (!(commit_flags & TREESAME)) nr++; on_list++; } list = last; show_list("bisection 2 sorted", 0, nr, list); *all = nr; CALLOC_ARRAY(weights, on_list); /* Do the real work of finding bisection commit. */ best = do_find_bisection(list, nr, weights, bisect_flags); if (best) { if (!(bisect_flags & FIND_BISECTION_ALL)) { list->item = best->item; free_commit_list(list->next); best = list; best->next = NULL; } *reaches = weight(best); } *commit_list = best; free(weights); clear_commit_weight(&commit_weight); } static int register_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid, int flags UNUSED, void *cb_data UNUSED) { struct strbuf good_prefix = STRBUF_INIT; strbuf_addstr(&good_prefix, term_good); strbuf_addstr(&good_prefix, "-"); if (!strcmp(refname, term_bad)) { free(current_bad_oid); current_bad_oid = xmalloc(sizeof(*current_bad_oid)); oidcpy(current_bad_oid, oid); } else if (starts_with(refname, good_prefix.buf)) { oid_array_append(&good_revs, oid); } else if (starts_with(refname, "skip-")) { oid_array_append(&skipped_revs, oid); } strbuf_release(&good_prefix); return 0; } static int read_bisect_refs(void) { return refs_for_each_ref_in(get_main_ref_store(the_repository), "refs/bisect/", register_ref, NULL); } static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES") static GIT_PATH_FUNC(git_path_bisect_ancestors_ok, "BISECT_ANCESTORS_OK") static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN") static GIT_PATH_FUNC(git_path_bisect_start, "BISECT_START") static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG") static GIT_PATH_FUNC(git_path_bisect_terms, "BISECT_TERMS") static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT") static void read_bisect_paths(struct strvec *array) { struct strbuf str = STRBUF_INIT; const char *filename = git_path_bisect_names(); FILE *fp = xfopen(filename, "r"); while (strbuf_getline_lf(&str, fp) != EOF) { strbuf_trim(&str); if (sq_dequote_to_strvec(str.buf, array)) die(_("Badly quoted content in file '%s': %s"), filename, str.buf); } strbuf_release(&str); fclose(fp); } static char *join_oid_array_hex(struct oid_array *array, char delim) { struct strbuf joined_hexs = STRBUF_INIT; int i; for (i = 0; i < array->nr; i++) { strbuf_addstr(&joined_hexs, oid_to_hex(array->oid + i)); if (i + 1 < array->nr) strbuf_addch(&joined_hexs, delim); } return strbuf_detach(&joined_hexs, NULL); } /* * In this function, passing a not NULL skipped_first is very special. * It means that we want to know if the first commit in the list is * skipped because we will want to test a commit away from it if it is * indeed skipped. * So if the first commit is skipped, we cannot take the shortcut to * just "return list" when we find the first non skipped commit, we * have to return a fully filtered list. * * We use (*skipped_first == -1) to mean "it has been found that the * first commit is not skipped". In this case *skipped_first is set back * to 0 just before the function returns. */ struct commit_list *filter_skipped(struct commit_list *list, struct commit_list **tried, int show_all, int *count, int *skipped_first) { struct commit_list *filtered = NULL, **f = &filtered; *tried = NULL; if (skipped_first) *skipped_first = 0; if (count) *count = 0; if (!skipped_revs.nr) return list; while (list) { struct commit_list *next = list->next; list->next = NULL; if (0 <= oid_array_lookup(&skipped_revs, &list->item->object.oid)) { if (skipped_first && !*skipped_first) *skipped_first = 1; /* Move current to tried list */ *tried = list; tried = &list->next; } else { if (!show_all) { if (!skipped_first || !*skipped_first) { free_commit_list(next); free_commit_list(filtered); return list; } } else if (skipped_first && !*skipped_first) { /* This means we know it's not skipped */ *skipped_first = -1; } /* Move current to filtered list */ *f = list; f = &list->next; if (count) (*count)++; } list = next; } if (skipped_first && *skipped_first == -1) *skipped_first = 0; return filtered; } #define PRN_MODULO 32768 /* * This is a pseudo random number generator based on "man 3 rand". * It is not used properly because the seed is the argument and it * is increased by one between each call, but that should not matter * for this application. */ static unsigned get_prn(unsigned count) { count = count * 1103515245 + 12345; return (count/65536) % PRN_MODULO; } /* * Custom integer square root from * https://en.wikipedia.org/wiki/Integer_square_root */ static int sqrti(int val) { float d, x = val; if (!val) return 0; do { float y = (x + (float)val / x) / 2; d = (y > x) ? y - x : x - y; x = y; } while (d >= 0.5); return (int)x; } static struct commit_list *skip_away(struct commit_list *list, int count) { struct commit_list *cur, *previous, *result = list; int prn, index, i; prn = get_prn(count); index = (count * prn / PRN_MODULO) * sqrti(prn) / sqrti(PRN_MODULO); cur = list; previous = NULL; for (i = 0; cur; cur = cur->next, i++) { if (i == index) { if (!oideq(&cur->item->object.oid, current_bad_oid)) result = cur; else if (previous) result = previous; else result = list; break; } previous = cur; } for (cur = list; cur != result; ) { struct commit_list *next = cur->next; free(cur); cur = next; } return result; } static struct commit_list *managed_skipped(struct commit_list *list, struct commit_list **tried) { int count, skipped_first; *tried = NULL; if (!skipped_revs.nr) return list; list = filter_skipped(list, tried, 0, &count, &skipped_first); if (!skipped_first) return list; return skip_away(list, count); } static void bisect_rev_setup(struct repository *r, struct rev_info *revs, struct strvec *rev_argv, const char *prefix, const char *bad_format, const char *good_format, int read_paths) { struct setup_revision_opt opt = { .free_removed_argv_elements = 1, }; int i; repo_init_revisions(r, revs, prefix); revs->abbrev = 0; revs->commit_format = CMIT_FMT_UNSPECIFIED; /* rev_argv.argv[0] will be ignored by setup_revisions */ strvec_push(rev_argv, "bisect_rev_setup"); strvec_pushf(rev_argv, bad_format, oid_to_hex(current_bad_oid)); for (i = 0; i < good_revs.nr; i++) strvec_pushf(rev_argv, good_format, oid_to_hex(good_revs.oid + i)); strvec_push(rev_argv, "--"); if (read_paths) read_bisect_paths(rev_argv); setup_revisions(rev_argv->nr, rev_argv->v, revs, &opt); } static void bisect_common(struct rev_info *revs) { if (prepare_revision_walk(revs)) die("revision walk setup failed"); if (revs->tree_objects) mark_edges_uninteresting(revs, NULL, 0); } static enum bisect_error error_if_skipped_commits(struct commit_list *tried, const struct object_id *bad) { if (!tried) return BISECT_OK; printf("There are only 'skip'ped commits left to test.\n" "The first %s commit could be any of:\n", term_bad); for ( ; tried; tried = tried->next) printf("%s\n", oid_to_hex(&tried->item->object.oid)); if (bad) printf("%s\n", oid_to_hex(bad)); printf(_("We cannot bisect more!\n")); return BISECT_ONLY_SKIPPED_LEFT; } static int is_expected_rev(const struct object_id *oid) { struct object_id expected_oid; if (refs_read_ref(get_main_ref_store(the_repository), "BISECT_EXPECTED_REV", &expected_oid)) return 0; return oideq(oid, &expected_oid); } enum bisect_error bisect_checkout(const struct object_id *bisect_rev, int no_checkout) { struct commit *commit; struct pretty_print_context pp = {0}; struct strbuf commit_msg = STRBUF_INIT; refs_update_ref(get_main_ref_store(the_repository), NULL, "BISECT_EXPECTED_REV", bisect_rev, NULL, 0, UPDATE_REFS_DIE_ON_ERR); if (no_checkout) { refs_update_ref(get_main_ref_store(the_repository), NULL, "BISECT_HEAD", bisect_rev, NULL, 0, UPDATE_REFS_DIE_ON_ERR); } else { struct child_process cmd = CHILD_PROCESS_INIT; cmd.git_cmd = 1; strvec_pushl(&cmd.args, "checkout", "-q", oid_to_hex(bisect_rev), "--", NULL); if (run_command(&cmd)) /* * Errors in `run_command()` itself, signaled by res < 0, * and errors in the child process, signaled by res > 0 * can both be treated as regular BISECT_FAILED (-1). */ return BISECT_FAILED; } commit = lookup_commit_reference(the_repository, bisect_rev); repo_format_commit_message(the_repository, commit, "[%H] %s%n", &commit_msg, &pp); fputs(commit_msg.buf, stdout); strbuf_release(&commit_msg); return BISECT_OK; } static struct commit *get_commit_reference(struct repository *r, const struct object_id *oid) { struct commit *c = lookup_commit_reference(r, oid); if (!c) die(_("Not a valid commit name %s"), oid_to_hex(oid)); return c; } static struct commit **get_bad_and_good_commits(struct repository *r, int *rev_nr) { struct commit **rev; int i, n = 0; ALLOC_ARRAY(rev, 1 + good_revs.nr); rev[n++] = get_commit_reference(r, current_bad_oid); for (i = 0; i < good_revs.nr; i++) rev[n++] = get_commit_reference(r, good_revs.oid + i); *rev_nr = n; return rev; } static enum bisect_error handle_bad_merge_base(void) { if (is_expected_rev(current_bad_oid)) { char *bad_hex = oid_to_hex(current_bad_oid); char *good_hex = join_oid_array_hex(&good_revs, ' '); if (!strcmp(term_bad, "bad") && !strcmp(term_good, "good")) { fprintf(stderr, _("The merge base %s is bad.\n" "This means the bug has been fixed " "between %s and [%s].\n"), bad_hex, bad_hex, good_hex); } else if (!strcmp(term_bad, "new") && !strcmp(term_good, "old")) { fprintf(stderr, _("The merge base %s is new.\n" "The property has changed " "between %s and [%s].\n"), bad_hex, bad_hex, good_hex); } else { fprintf(stderr, _("The merge base %s is %s.\n" "This means the first '%s' commit is " "between %s and [%s].\n"), bad_hex, term_bad, term_good, bad_hex, good_hex); } free(good_hex); return BISECT_MERGE_BASE_CHECK; } fprintf(stderr, _("Some %s revs are not ancestors of the %s rev.\n" "git bisect cannot work properly in this case.\n" "Maybe you mistook %s and %s revs?\n"), term_good, term_bad, term_good, term_bad); return BISECT_FAILED; } static void handle_skipped_merge_base(const struct object_id *mb) { char *mb_hex = oid_to_hex(mb); char *bad_hex = oid_to_hex(current_bad_oid); char *good_hex = join_oid_array_hex(&good_revs, ' '); warning(_("the merge base between %s and [%s] " "must be skipped.\n" "So we cannot be sure the first %s commit is " "between %s and %s.\n" "We continue anyway."), bad_hex, good_hex, term_bad, mb_hex, bad_hex); free(good_hex); } /* * "check_merge_bases" checks that merge bases are not "bad" (or "new"). * * - If one is "bad" (or "new"), it means the user assumed something wrong * and we must return error with a non 0 error code. * - If one is "good" (or "old"), that's good, we have nothing to do. * - If one is "skipped", we can't know but we should warn. * - If we don't know, we should check it out and ask the user to test. * - If a merge base must be tested, on success return * BISECT_INTERNAL_SUCCESS_MERGE_BASE (-11) a special condition * for early success, this will be converted back to 0 in * check_good_are_ancestors_of_bad(). */ static enum bisect_error check_merge_bases(int rev_nr, struct commit **rev, int no_checkout) { enum bisect_error res = BISECT_OK; struct commit_list *result = NULL; if (repo_get_merge_bases_many(the_repository, rev[0], rev_nr - 1, rev + 1, &result) < 0) exit(128); for (struct commit_list *l = result; l; l = l->next) { const struct object_id *mb = &l->item->object.oid; if (oideq(mb, current_bad_oid)) { res = handle_bad_merge_base(); break; } else if (0 <= oid_array_lookup(&good_revs, mb)) { continue; } else if (0 <= oid_array_lookup(&skipped_revs, mb)) { handle_skipped_merge_base(mb); } else { printf(_("Bisecting: a merge base must be tested\n")); res = bisect_checkout(mb, no_checkout); if (!res) /* indicate early success */ res = BISECT_INTERNAL_SUCCESS_MERGE_BASE; break; } } free_commit_list(result); return res; } static int check_ancestors(struct repository *r, int rev_nr, struct commit **rev, const char *prefix) { struct strvec rev_argv = STRVEC_INIT; struct rev_info revs; int res; bisect_rev_setup(r, &revs, &rev_argv, prefix, "^%s", "%s", 0); bisect_common(&revs); res = (revs.commits != NULL); /* Clean up objects used, as they will be reused. */ clear_commit_marks_many(rev_nr, rev, ALL_REV_FLAGS); release_revisions(&revs); strvec_clear(&rev_argv); return res; } /* * "check_good_are_ancestors_of_bad" checks that all "good" revs are * ancestor of the "bad" rev. * * If that's not the case, we need to check the merge bases. * If a merge base must be tested by the user, its source code will be * checked out to be tested by the user and we will return. */ static enum bisect_error check_good_are_ancestors_of_bad(struct repository *r, const char *prefix, int no_checkout) { char *filename; struct stat st; int fd, rev_nr; enum bisect_error res = BISECT_OK; struct commit **rev; if (!current_bad_oid) return error(_("a %s revision is needed"), term_bad); filename = git_pathdup("BISECT_ANCESTORS_OK"); /* Check if file BISECT_ANCESTORS_OK exists. */ if (!stat(filename, &st) && S_ISREG(st.st_mode)) goto done; /* Bisecting with no good rev is ok. */ if (!good_revs.nr) goto done; /* Check if all good revs are ancestor of the bad rev. */ rev = get_bad_and_good_commits(r, &rev_nr); if (check_ancestors(r, rev_nr, rev, prefix)) res = check_merge_bases(rev_nr, rev, no_checkout); free(rev); if (!res) { /* Create file BISECT_ANCESTORS_OK. */ fd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (fd < 0) /* * BISECT_ANCESTORS_OK file is not absolutely necessary, * the bisection process will continue at the next * bisection step. * So, just signal with a warning that something * might be wrong. */ warning_errno(_("could not create file '%s'"), filename); else close(fd); } done: free(filename); return res; } /* * Display a commit summary to the user. */ static void show_commit(struct commit *commit) { struct child_process show = CHILD_PROCESS_INIT; /* * Call git show with --no-pager, as it would otherwise * paginate the "git show" output only, not the output * from bisect_next_all(); this can be fixed by moving * it into a --format parameter, but that would override * the user's default options for "git show", which we * are trying to honour. */ strvec_pushl(&show.args, "--no-pager", "show", "--stat", "--summary", "--no-abbrev-commit", "--diff-merges=first-parent", oid_to_hex(&commit->object.oid), NULL); show.git_cmd = 1; if (run_command(&show)) die(_("unable to start 'show' for object '%s'"), oid_to_hex(&commit->object.oid)); } /* * The terms used for this bisect session are stored in BISECT_TERMS. * We read them and store them to adapt the messages accordingly. * Default is bad/good. */ void read_bisect_terms(char **read_bad, char **read_good) { struct strbuf str = STRBUF_INIT; const char *filename = git_path_bisect_terms(); FILE *fp = fopen(filename, "r"); if (!fp) { if (errno == ENOENT) { free(*read_bad); *read_bad = xstrdup("bad"); free(*read_good); *read_good = xstrdup("good"); return; } else { die_errno(_("could not read file '%s'"), filename); } } else { strbuf_getline_lf(&str, fp); free(*read_bad); *read_bad = strbuf_detach(&str, NULL); strbuf_getline_lf(&str, fp); free(*read_good); *read_good = strbuf_detach(&str, NULL); } strbuf_release(&str); fclose(fp); } /* * We use the convention that return BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND (-10) means * the bisection process finished successfully. * In this case the calling function or command should not turn a * BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND return code into an error or a non zero exit code. * * Checking BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND * in bisect_helper::bisect_next() and only transforming it to 0 at * the end of bisect_helper::cmd_bisect__helper() helps bypassing * all the code related to finding a commit to test. */ enum bisect_error bisect_next_all(struct repository *r, const char *prefix) { struct strvec rev_argv = STRVEC_INIT; struct rev_info revs = REV_INFO_INIT; struct commit_list *tried = NULL; int reaches = 0, all = 0, nr, steps; enum bisect_error res = BISECT_OK; struct object_id *bisect_rev; char *steps_msg; /* * If no_checkout is non-zero, the bisection process does not * checkout the trial commit but instead simply updates BISECT_HEAD. */ int no_checkout = refs_ref_exists(get_main_ref_store(the_repository), "BISECT_HEAD"); unsigned bisect_flags = 0; read_bisect_terms(&term_bad, &term_good); if (read_bisect_refs()) die(_("reading bisect refs failed")); if (file_exists(git_path_bisect_first_parent())) bisect_flags |= FIND_BISECTION_FIRST_PARENT_ONLY; if (skipped_revs.nr) bisect_flags |= FIND_BISECTION_ALL; res = check_good_are_ancestors_of_bad(r, prefix, no_checkout); if (res) goto cleanup; bisect_rev_setup(r, &revs, &rev_argv, prefix, "%s", "^%s", 1); revs.first_parent_only = !!(bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY); revs.limited = 1; bisect_common(&revs); find_bisection(&revs.commits, &reaches, &all, bisect_flags); revs.commits = managed_skipped(revs.commits, &tried); if (!revs.commits) { /* * We should return error here only if the "bad" * commit is also a "skip" commit. */ res = error_if_skipped_commits(tried, NULL); if (res < 0) goto cleanup; printf(_("%s was both %s and %s\n"), oid_to_hex(current_bad_oid), term_good, term_bad); res = BISECT_FAILED; goto cleanup; } if (!all) { fprintf(stderr, _("No testable commit found.\n" "Maybe you started with bad path arguments?\n")); res = BISECT_NO_TESTABLE_COMMIT; goto cleanup; } bisect_rev = &revs.commits->item->object.oid; if (oideq(bisect_rev, current_bad_oid)) { res = error_if_skipped_commits(tried, current_bad_oid); if (res) goto cleanup; printf("%s is the first %s commit\n", oid_to_hex(bisect_rev), term_bad); show_commit(revs.commits->item); /* * This means the bisection process succeeded. * Using BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND (-10) * so that the call chain can simply check * for negative return values for early returns up * until the cmd_bisect__helper() caller. */ res = BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND; goto cleanup; } nr = all - reaches - 1; steps = estimate_bisect_steps(all); steps_msg = xstrfmt(Q_("(roughly %d step)", "(roughly %d steps)", steps), steps); /* * TRANSLATORS: the last %s will be replaced with "(roughly %d * steps)" translation. */ printf(Q_("Bisecting: %d revision left to test after this %s\n", "Bisecting: %d revisions left to test after this %s\n", nr), nr, steps_msg); free(steps_msg); /* Clean up objects used, as they will be reused. */ repo_clear_commit_marks(r, ALL_REV_FLAGS); res = bisect_checkout(bisect_rev, no_checkout); cleanup: free_commit_list(tried); release_revisions(&revs); strvec_clear(&rev_argv); return res; } static inline int exp2i(int n) { return 1 << n; } /* * Estimate the number of bisect steps left (after the current step) * * For any x between 0 included and 2^n excluded, the probability for * n - 1 steps left looks like: * * P(2^n + x) == (2^n - x) / (2^n + x) * * and P(2^n + x) < 0.5 means 2^n < 3x */ int estimate_bisect_steps(int all) { int n, x, e; if (all < 3) return 0; n = log2u(all); e = exp2i(n); x = all - e; return (e < 3 * x) ? n : n - 1; } static int mark_for_removal(const char *refname, const char *referent UNUSED, const struct object_id *oid UNUSED, int flag UNUSED, void *cb_data) { struct string_list *refs = cb_data; char *ref = xstrfmt("refs/bisect%s", refname); string_list_append(refs, ref); return 0; } int bisect_clean_state(void) { int result = 0; /* There may be some refs packed during bisection */ struct string_list refs_for_removal = STRING_LIST_INIT_NODUP; refs_for_each_ref_in(get_main_ref_store(the_repository), "refs/bisect", mark_for_removal, (void *) &refs_for_removal); string_list_append(&refs_for_removal, xstrdup("BISECT_HEAD")); string_list_append(&refs_for_removal, xstrdup("BISECT_EXPECTED_REV")); result = refs_delete_refs(get_main_ref_store(the_repository), "bisect: remove", &refs_for_removal, REF_NO_DEREF); refs_for_removal.strdup_strings = 1; string_list_clear(&refs_for_removal, 0); unlink_or_warn(git_path_bisect_ancestors_ok()); unlink_or_warn(git_path_bisect_log()); unlink_or_warn(git_path_bisect_names()); unlink_or_warn(git_path_bisect_run()); unlink_or_warn(git_path_bisect_terms()); unlink_or_warn(git_path_bisect_first_parent()); /* * Cleanup BISECT_START last to support the --no-checkout option * introduced in the commit 4796e823a. */ unlink_or_warn(git_path_bisect_start()); return result; } git-cinnabar-0.7.0/git-core/bisect.h000064400000000000000000000046661046102023000153350ustar 00000000000000#ifndef BISECT_H #define BISECT_H struct commit_list; struct repository; struct object_id; /* * Find bisection. If something is found, `reaches` will be the number of * commits that the best commit reaches. `all` will be the count of * non-SAMETREE commits. If nothing is found, `list` will be NULL. * Otherwise, it will be either all non-SAMETREE commits or the single * best commit, as chosen by `find_all`. */ void find_bisection(struct commit_list **list, int *reaches, int *all, unsigned bisect_flags); struct commit_list *filter_skipped(struct commit_list *list, struct commit_list **tried, int show_all, int *count, int *skipped_first); #define BISECT_SHOW_ALL (1<<0) #define REV_LIST_QUIET (1<<1) #define FIND_BISECTION_ALL (1u<<0) #define FIND_BISECTION_FIRST_PARENT_ONLY (1u<<1) struct rev_list_info { struct rev_info *revs; int flags; int show_timestamp; int hdr_termination; const char *header_prefix; }; /* * enum bisect_error represents the following return codes: * BISECT_OK: success code. Internally, it means that next * commit has been found (and possibly checked out) and it * should be tested. * BISECT_FAILED error code: default error code. * BISECT_ONLY_SKIPPED_LEFT error code: only skipped * commits left to be tested. * BISECT_MERGE_BASE_CHECK error code: merge base check failed. * BISECT_NO_TESTABLE_COMMIT error code: no testable commit found. * BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND early success code: * first term_bad commit found. * BISECT_INTERNAL_SUCCESS_MERGE_BASE early success * code: found merge base that should be tested. * Early success codes BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND and * BISECT_INTERNAL_SUCCESS_MERGE_BASE should be only internal codes. */ enum bisect_error { BISECT_OK = 0, BISECT_FAILED = -1, BISECT_ONLY_SKIPPED_LEFT = -2, BISECT_MERGE_BASE_CHECK = -3, BISECT_NO_TESTABLE_COMMIT = -4, BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND = -10, BISECT_INTERNAL_SUCCESS_MERGE_BASE = -11 }; /* * Stores how many good/bad commits we have stored for a bisect. nr_bad can * only be 0 or 1. */ struct bisect_state { unsigned int nr_good; unsigned int nr_bad; }; enum bisect_error bisect_next_all(struct repository *r, const char *prefix); int estimate_bisect_steps(int all); void read_bisect_terms(char **bad, char **good); int bisect_clean_state(void); enum bisect_error bisect_checkout(const struct object_id *bisect_rev, int no_checkout); #endif git-cinnabar-0.7.0/git-core/blame.c000064400000000000000000002504301046102023000151270ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "refs.h" #include "object-store-ll.h" #include "cache-tree.h" #include "mergesort.h" #include "commit.h" #include "convert.h" #include "diff.h" #include "diffcore.h" #include "gettext.h" #include "hex.h" #include "path.h" #include "read-cache.h" #include "revision.h" #include "setup.h" #include "tag.h" #include "trace2.h" #include "blame.h" #include "alloc.h" #include "commit-slab.h" #include "bloom.h" #include "commit-graph.h" define_commit_slab(blame_suspects, struct blame_origin *); static struct blame_suspects blame_suspects; struct blame_origin *get_blame_suspects(struct commit *commit) { struct blame_origin **result; result = blame_suspects_peek(&blame_suspects, commit); return result ? *result : NULL; } static void set_blame_suspects(struct commit *commit, struct blame_origin *origin) { *blame_suspects_at(&blame_suspects, commit) = origin; } void blame_origin_decref(struct blame_origin *o) { if (o && --o->refcnt <= 0) { struct blame_origin *p, *l = NULL; if (o->previous) blame_origin_decref(o->previous); free(o->file.ptr); /* Should be present exactly once in commit chain */ for (p = get_blame_suspects(o->commit); p; l = p, p = p->next) { if (p == o) { if (l) l->next = p->next; else set_blame_suspects(o->commit, p->next); free(o); return; } } die("internal error in blame_origin_decref"); } } /* * Given a commit and a path in it, create a new origin structure. * The callers that add blame to the scoreboard should use * get_origin() to obtain shared, refcounted copy instead of calling * this function directly. */ static struct blame_origin *make_origin(struct commit *commit, const char *path) { struct blame_origin *o; FLEX_ALLOC_STR(o, path, path); o->commit = commit; o->refcnt = 1; o->next = get_blame_suspects(commit); set_blame_suspects(commit, o); return o; } /* * Locate an existing origin or create a new one. * This moves the origin to front position in the commit util list. */ static struct blame_origin *get_origin(struct commit *commit, const char *path) { struct blame_origin *o, *l; for (o = get_blame_suspects(commit), l = NULL; o; l = o, o = o->next) { if (!strcmp(o->path, path)) { /* bump to front */ if (l) { l->next = o->next; o->next = get_blame_suspects(commit); set_blame_suspects(commit, o); } return blame_origin_incref(o); } } return make_origin(commit, path); } static void verify_working_tree_path(struct repository *r, struct commit *work_tree, const char *path) { struct commit_list *parents; int pos; for (parents = work_tree->parents; parents; parents = parents->next) { const struct object_id *commit_oid = &parents->item->object.oid; struct object_id blob_oid; unsigned short mode; if (!get_tree_entry(r, commit_oid, path, &blob_oid, &mode) && oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB) return; } pos = index_name_pos(r->index, path, strlen(path)); if (pos >= 0) ; /* path is in the index */ else if (-1 - pos < r->index->cache_nr && !strcmp(r->index->cache[-1 - pos]->name, path)) ; /* path is in the index, unmerged */ else die("no such path '%s' in HEAD", path); } static struct commit_list **append_parent(struct repository *r, struct commit_list **tail, const struct object_id *oid) { struct commit *parent; parent = lookup_commit_reference(r, oid); if (!parent) die("no such commit %s", oid_to_hex(oid)); return &commit_list_insert(parent, tail)->next; } static void append_merge_parents(struct repository *r, struct commit_list **tail) { int merge_head; struct strbuf line = STRBUF_INIT; merge_head = open(git_path_merge_head(r), O_RDONLY); if (merge_head < 0) { if (errno == ENOENT) return; die("cannot open '%s' for reading", git_path_merge_head(r)); } while (!strbuf_getwholeline_fd(&line, merge_head, '\n')) { struct object_id oid; if (get_oid_hex(line.buf, &oid)) die("unknown line in '%s': %s", git_path_merge_head(r), line.buf); tail = append_parent(r, tail, &oid); } close(merge_head); strbuf_release(&line); } /* * This isn't as simple as passing sb->buf and sb->len, because we * want to transfer ownership of the buffer to the commit (so we * must use detach). */ static void set_commit_buffer_from_strbuf(struct repository *r, struct commit *c, struct strbuf *sb) { size_t len; void *buf = strbuf_detach(sb, &len); set_commit_buffer(r, c, buf, len); } /* * Prepare a dummy commit that represents the work tree (or staged) item. * Note that annotating work tree item never works in the reverse. */ static struct commit *fake_working_tree_commit(struct repository *r, struct diff_options *opt, const char *path, const char *contents_from, struct object_id *oid) { struct commit *commit; struct blame_origin *origin; struct commit_list **parent_tail, *parent; struct strbuf buf = STRBUF_INIT; const char *ident; time_t now; int len; struct cache_entry *ce; unsigned mode; struct strbuf msg = STRBUF_INIT; repo_read_index(r); time(&now); commit = alloc_commit_node(r); commit->object.parsed = 1; commit->date = now; parent_tail = &commit->parents; parent_tail = append_parent(r, parent_tail, oid); append_merge_parents(r, parent_tail); verify_working_tree_path(r, commit, path); origin = make_origin(commit, path); if (contents_from) ident = fmt_ident("External file (--contents)", "external.file", WANT_BLANK_IDENT, NULL, 0); else ident = fmt_ident("Not Committed Yet", "not.committed.yet", WANT_BLANK_IDENT, NULL, 0); strbuf_addstr(&msg, "tree 0000000000000000000000000000000000000000\n"); for (parent = commit->parents; parent; parent = parent->next) strbuf_addf(&msg, "parent %s\n", oid_to_hex(&parent->item->object.oid)); strbuf_addf(&msg, "author %s\n" "committer %s\n\n" "Version of %s from %s\n", ident, ident, path, (!contents_from ? path : (!strcmp(contents_from, "-") ? "standard input" : contents_from))); set_commit_buffer_from_strbuf(r, commit, &msg); if (!contents_from || strcmp("-", contents_from)) { struct stat st; const char *read_from; char *buf_ptr; unsigned long buf_len; if (contents_from) { if (stat(contents_from, &st) < 0) die_errno("Cannot stat '%s'", contents_from); read_from = contents_from; } else { if (lstat(path, &st) < 0) die_errno("Cannot lstat '%s'", path); read_from = path; } mode = canon_mode(st.st_mode); switch (st.st_mode & S_IFMT) { case S_IFREG: if (opt->flags.allow_textconv && textconv_object(r, read_from, mode, null_oid(), 0, &buf_ptr, &buf_len)) strbuf_attach(&buf, buf_ptr, buf_len, buf_len + 1); else if (strbuf_read_file(&buf, read_from, st.st_size) != st.st_size) die_errno("cannot open or read '%s'", read_from); break; case S_IFLNK: if (strbuf_readlink(&buf, read_from, st.st_size) < 0) die_errno("cannot readlink '%s'", read_from); break; default: die("unsupported file type %s", read_from); } } else { /* Reading from stdin */ mode = 0; if (strbuf_read(&buf, 0, 0) < 0) die_errno("failed to read from stdin"); } convert_to_git(r->index, path, buf.buf, buf.len, &buf, 0); origin->file.ptr = buf.buf; origin->file.size = buf.len; pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); /* * Read the current index, replace the path entry with * origin->blob_sha1 without mucking with its mode or type * bits; we are not going to write this index out -- we just * want to run "diff-index --cached". */ discard_index(r->index); repo_read_index(r); len = strlen(path); if (!mode) { int pos = index_name_pos(r->index, path, len); if (0 <= pos) mode = r->index->cache[pos]->ce_mode; else /* Let's not bother reading from HEAD tree */ mode = S_IFREG | 0644; } ce = make_empty_cache_entry(r->index, len); oidcpy(&ce->oid, &origin->blob_oid); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(0); ce->ce_namelen = len; ce->ce_mode = create_ce_mode(mode); add_index_entry(r->index, ce, ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); cache_tree_invalidate_path(r->index, path); return commit; } static int diff_hunks(mmfile_t *file_a, mmfile_t *file_b, xdl_emit_hunk_consume_func_t hunk_func, void *cb_data, int xdl_opts) { xpparam_t xpp = {0}; xdemitconf_t xecfg = {0}; xdemitcb_t ecb = {NULL}; xpp.flags = xdl_opts; xecfg.hunk_func = hunk_func; ecb.priv = cb_data; return xdi_diff(file_a, file_b, &xpp, &xecfg, &ecb); } static const char *get_next_line(const char *start, const char *end) { const char *nl = memchr(start, '\n', end - start); return nl ? nl + 1 : end; } static int find_line_starts(int **line_starts, const char *buf, unsigned long len) { const char *end = buf + len; const char *p; int *lineno; int num = 0; for (p = buf; p < end; p = get_next_line(p, end)) num++; ALLOC_ARRAY(*line_starts, num + 1); lineno = *line_starts; for (p = buf; p < end; p = get_next_line(p, end)) *lineno++ = p - buf; *lineno = len; return num; } struct fingerprint_entry; /* A fingerprint is intended to loosely represent a string, such that two * fingerprints can be quickly compared to give an indication of the similarity * of the strings that they represent. * * A fingerprint is represented as a multiset of the lower-cased byte pairs in * the string that it represents. Whitespace is added at each end of the * string. Whitespace pairs are ignored. Whitespace is converted to '\0'. * For example, the string "Darth Radar" will be converted to the following * fingerprint: * {"\0d", "da", "da", "ar", "ar", "rt", "th", "h\0", "\0r", "ra", "ad", "r\0"} * * The similarity between two fingerprints is the size of the intersection of * their multisets, including repeated elements. See fingerprint_similarity for * examples. * * For ease of implementation, the fingerprint is implemented as a map * of byte pairs to the count of that byte pair in the string, instead of * allowing repeated elements in a set. */ struct fingerprint { struct hashmap map; /* As we know the maximum number of entries in advance, it's * convenient to store the entries in a single array instead of having * the hashmap manage the memory. */ struct fingerprint_entry *entries; }; /* A byte pair in a fingerprint. Stores the number of times the byte pair * occurs in the string that the fingerprint represents. */ struct fingerprint_entry { /* The hashmap entry - the hash represents the byte pair in its * entirety so we don't need to store the byte pair separately. */ struct hashmap_entry entry; /* The number of times the byte pair occurs in the string that the * fingerprint represents. */ int count; }; /* See `struct fingerprint` for an explanation of what a fingerprint is. * \param result the fingerprint of the string is stored here. This must be * freed later using free_fingerprint. * \param line_begin the start of the string * \param line_end the end of the string */ static void get_fingerprint(struct fingerprint *result, const char *line_begin, const char *line_end) { unsigned int hash, c0 = 0, c1; const char *p; int max_map_entry_count = 1 + line_end - line_begin; struct fingerprint_entry *entry = xcalloc(max_map_entry_count, sizeof(struct fingerprint_entry)); struct fingerprint_entry *found_entry; hashmap_init(&result->map, NULL, NULL, max_map_entry_count); result->entries = entry; for (p = line_begin; p <= line_end; ++p, c0 = c1) { /* Always terminate the string with whitespace. * Normalise whitespace to 0, and normalise letters to * lower case. This won't work for multibyte characters but at * worst will match some unrelated characters. */ if ((p == line_end) || isspace(*p)) c1 = 0; else c1 = tolower(*p); hash = c0 | (c1 << 8); /* Ignore whitespace pairs */ if (hash == 0) continue; hashmap_entry_init(&entry->entry, hash); found_entry = hashmap_get_entry(&result->map, entry, /* member name */ entry, NULL); if (found_entry) { found_entry->count += 1; } else { entry->count = 1; hashmap_add(&result->map, &entry->entry); ++entry; } } } static void free_fingerprint(struct fingerprint *f) { hashmap_clear(&f->map); free(f->entries); } /* Calculates the similarity between two fingerprints as the size of the * intersection of their multisets, including repeated elements. See * `struct fingerprint` for an explanation of the fingerprint representation. * The similarity between "cat mat" and "father rather" is 2 because "at" is * present twice in both strings while the similarity between "tim" and "mit" * is 0. */ static int fingerprint_similarity(struct fingerprint *a, struct fingerprint *b) { int intersection = 0; struct hashmap_iter iter; const struct fingerprint_entry *entry_a, *entry_b; hashmap_for_each_entry(&b->map, &iter, entry_b, entry /* member name */) { entry_a = hashmap_get_entry(&a->map, entry_b, entry, NULL); if (entry_a) { intersection += entry_a->count < entry_b->count ? entry_a->count : entry_b->count; } } return intersection; } /* Subtracts byte-pair elements in B from A, modifying A in place. */ static void fingerprint_subtract(struct fingerprint *a, struct fingerprint *b) { struct hashmap_iter iter; struct fingerprint_entry *entry_a; const struct fingerprint_entry *entry_b; hashmap_iter_init(&b->map, &iter); hashmap_for_each_entry(&b->map, &iter, entry_b, entry /* member name */) { entry_a = hashmap_get_entry(&a->map, entry_b, entry, NULL); if (entry_a) { if (entry_a->count <= entry_b->count) hashmap_remove(&a->map, &entry_b->entry, NULL); else entry_a->count -= entry_b->count; } } } /* Calculate fingerprints for a series of lines. * Puts the fingerprints in the fingerprints array, which must have been * preallocated to allow storing line_count elements. */ static void get_line_fingerprints(struct fingerprint *fingerprints, const char *content, const int *line_starts, long first_line, long line_count) { int i; const char *linestart, *lineend; line_starts += first_line; for (i = 0; i < line_count; ++i) { linestart = content + line_starts[i]; lineend = content + line_starts[i + 1]; get_fingerprint(fingerprints + i, linestart, lineend); } } static void free_line_fingerprints(struct fingerprint *fingerprints, int nr_fingerprints) { int i; for (i = 0; i < nr_fingerprints; i++) free_fingerprint(&fingerprints[i]); } /* This contains the data necessary to linearly map a line number in one half * of a diff chunk to the line in the other half of the diff chunk that is * closest in terms of its position as a fraction of the length of the chunk. */ struct line_number_mapping { int destination_start, destination_length, source_start, source_length; }; /* Given a line number in one range, offset and scale it to map it onto the * other range. * Essentially this mapping is a simple linear equation but the calculation is * more complicated to allow performing it with integer operations. * Another complication is that if a line could map onto many lines in the * destination range then we want to choose the line at the center of those * possibilities. * Example: if the chunk is 2 lines long in A and 10 lines long in B then the * first 5 lines in B will map onto the first line in the A chunk, while the * last 5 lines will all map onto the second line in the A chunk. * Example: if the chunk is 10 lines long in A and 2 lines long in B then line * 0 in B will map onto line 2 in A, and line 1 in B will map onto line 7 in A. */ static int map_line_number(int line_number, const struct line_number_mapping *mapping) { return ((line_number - mapping->source_start) * 2 + 1) * mapping->destination_length / (mapping->source_length * 2) + mapping->destination_start; } /* Get a pointer to the element storing the similarity between a line in A * and a line in B. * * The similarities are stored in a 2-dimensional array. Each "row" in the * array contains the similarities for a line in B. The similarities stored in * a row are the similarities between the line in B and the nearby lines in A. * To keep the length of each row the same, it is padded out with values of -1 * where the search range extends beyond the lines in A. * For example, if max_search_distance_a is 2 and the two sides of a diff chunk * look like this: * a | m * b | n * c | o * d | p * e | q * Then the similarity array will contain: * [-1, -1, am, bm, cm, * -1, an, bn, cn, dn, * ao, bo, co, do, eo, * bp, cp, dp, ep, -1, * cq, dq, eq, -1, -1] * Where similarities are denoted either by -1 for invalid, or the * concatenation of the two lines in the diff being compared. * * \param similarities array of similarities between lines in A and B * \param line_a the index of the line in A, in the same frame of reference as * closest_line_a. * \param local_line_b the index of the line in B, relative to the first line * in B that similarities represents. * \param closest_line_a the index of the line in A that is deemed to be * closest to local_line_b. This must be in the same * frame of reference as line_a. This value defines * where similarities is centered for the line in B. * \param max_search_distance_a maximum distance in lines from the closest line * in A for other lines in A for which * similarities may be calculated. */ static int *get_similarity(int *similarities, int line_a, int local_line_b, int closest_line_a, int max_search_distance_a) { assert(abs(line_a - closest_line_a) <= max_search_distance_a); return similarities + line_a - closest_line_a + max_search_distance_a + local_line_b * (max_search_distance_a * 2 + 1); } #define CERTAIN_NOTHING_MATCHES -2 #define CERTAINTY_NOT_CALCULATED -1 /* Given a line in B, first calculate its similarities with nearby lines in A * if not already calculated, then identify the most similar and second most * similar lines. The "certainty" is calculated based on those two * similarities. * * \param start_a the index of the first line of the chunk in A * \param length_a the length in lines of the chunk in A * \param local_line_b the index of the line in B, relative to the first line * in the chunk. * \param fingerprints_a array of fingerprints for the chunk in A * \param fingerprints_b array of fingerprints for the chunk in B * \param similarities 2-dimensional array of similarities between lines in A * and B. See get_similarity() for more details. * \param certainties array of values indicating how strongly a line in B is * matched with some line in A. * \param second_best_result array of absolute indices in A for the second * closest match of a line in B. * \param result array of absolute indices in A for the closest match of a line * in B. * \param max_search_distance_a maximum distance in lines from the closest line * in A for other lines in A for which * similarities may be calculated. * \param map_line_number_in_b_to_a parameter to map_line_number(). */ static void find_best_line_matches( int start_a, int length_a, int start_b, int local_line_b, struct fingerprint *fingerprints_a, struct fingerprint *fingerprints_b, int *similarities, int *certainties, int *second_best_result, int *result, const int max_search_distance_a, const struct line_number_mapping *map_line_number_in_b_to_a) { int i, search_start, search_end, closest_local_line_a, *similarity, best_similarity = 0, second_best_similarity = 0, best_similarity_index = 0, second_best_similarity_index = 0; /* certainty has already been calculated so no need to redo the work */ if (certainties[local_line_b] != CERTAINTY_NOT_CALCULATED) return; closest_local_line_a = map_line_number( local_line_b + start_b, map_line_number_in_b_to_a) - start_a; search_start = closest_local_line_a - max_search_distance_a; if (search_start < 0) search_start = 0; search_end = closest_local_line_a + max_search_distance_a + 1; if (search_end > length_a) search_end = length_a; for (i = search_start; i < search_end; ++i) { similarity = get_similarity(similarities, i, local_line_b, closest_local_line_a, max_search_distance_a); if (*similarity == -1) { /* This value will never exceed 10 but assert just in * case */ assert(abs(i - closest_local_line_a) < 1000); /* scale the similarity by (1000 - distance from * closest line) to act as a tie break between lines * that otherwise are equally similar. */ *similarity = fingerprint_similarity( fingerprints_b + local_line_b, fingerprints_a + i) * (1000 - abs(i - closest_local_line_a)); } if (*similarity > best_similarity) { second_best_similarity = best_similarity; second_best_similarity_index = best_similarity_index; best_similarity = *similarity; best_similarity_index = i; } else if (*similarity > second_best_similarity) { second_best_similarity = *similarity; second_best_similarity_index = i; } } if (best_similarity == 0) { /* this line definitely doesn't match with anything. Mark it * with this special value so it doesn't get invalidated and * won't be recalculated. */ certainties[local_line_b] = CERTAIN_NOTHING_MATCHES; result[local_line_b] = -1; } else { /* Calculate the certainty with which this line matches. * If the line matches well with two lines then that reduces * the certainty. However we still want to prioritise matching * a line that matches very well with two lines over matching a * line that matches poorly with one line, hence doubling * best_similarity. * This means that if we have * line X that matches only one line with a score of 3, * line Y that matches two lines equally with a score of 5, * and line Z that matches only one line with a score or 2, * then the lines in order of certainty are X, Y, Z. */ certainties[local_line_b] = best_similarity * 2 - second_best_similarity; /* We keep both the best and second best results to allow us to * check at a later stage of the matching process whether the * result needs to be invalidated. */ result[local_line_b] = start_a + best_similarity_index; second_best_result[local_line_b] = start_a + second_best_similarity_index; } } /* * This finds the line that we can match with the most confidence, and * uses it as a partition. It then calls itself on the lines on either side of * that partition. In this way we avoid lines appearing out of order, and * retain a sensible line ordering. * \param start_a index of the first line in A with which lines in B may be * compared. * \param start_b index of the first line in B for which matching should be * done. * \param length_a number of lines in A with which lines in B may be compared. * \param length_b number of lines in B for which matching should be done. * \param fingerprints_a mutable array of fingerprints in A. The first element * corresponds to the line at start_a. * \param fingerprints_b array of fingerprints in B. The first element * corresponds to the line at start_b. * \param similarities 2-dimensional array of similarities between lines in A * and B. See get_similarity() for more details. * \param certainties array of values indicating how strongly a line in B is * matched with some line in A. * \param second_best_result array of absolute indices in A for the second * closest match of a line in B. * \param result array of absolute indices in A for the closest match of a line * in B. * \param max_search_distance_a maximum distance in lines from the closest line * in A for other lines in A for which * similarities may be calculated. * \param max_search_distance_b an upper bound on the greatest possible * distance between lines in B such that they will * both be compared with the same line in A * according to max_search_distance_a. * \param map_line_number_in_b_to_a parameter to map_line_number(). */ static void fuzzy_find_matching_lines_recurse( int start_a, int start_b, int length_a, int length_b, struct fingerprint *fingerprints_a, struct fingerprint *fingerprints_b, int *similarities, int *certainties, int *second_best_result, int *result, int max_search_distance_a, int max_search_distance_b, const struct line_number_mapping *map_line_number_in_b_to_a) { int i, invalidate_min, invalidate_max, offset_b, second_half_start_a, second_half_start_b, second_half_length_a, second_half_length_b, most_certain_line_a, most_certain_local_line_b = -1, most_certain_line_certainty = -1, closest_local_line_a; for (i = 0; i < length_b; ++i) { find_best_line_matches(start_a, length_a, start_b, i, fingerprints_a, fingerprints_b, similarities, certainties, second_best_result, result, max_search_distance_a, map_line_number_in_b_to_a); if (certainties[i] > most_certain_line_certainty) { most_certain_line_certainty = certainties[i]; most_certain_local_line_b = i; } } /* No matches. */ if (most_certain_local_line_b == -1) return; most_certain_line_a = result[most_certain_local_line_b]; /* * Subtract the most certain line's fingerprint in B from the matched * fingerprint in A. This means that other lines in B can't also match * the same parts of the line in A. */ fingerprint_subtract(fingerprints_a + most_certain_line_a - start_a, fingerprints_b + most_certain_local_line_b); /* Invalidate results that may be affected by the choice of most * certain line. */ invalidate_min = most_certain_local_line_b - max_search_distance_b; invalidate_max = most_certain_local_line_b + max_search_distance_b + 1; if (invalidate_min < 0) invalidate_min = 0; if (invalidate_max > length_b) invalidate_max = length_b; /* As the fingerprint in A has changed, discard previously calculated * similarity values with that fingerprint. */ for (i = invalidate_min; i < invalidate_max; ++i) { closest_local_line_a = map_line_number( i + start_b, map_line_number_in_b_to_a) - start_a; /* Check that the lines in A and B are close enough that there * is a similarity value for them. */ if (abs(most_certain_line_a - start_a - closest_local_line_a) > max_search_distance_a) { continue; } *get_similarity(similarities, most_certain_line_a - start_a, i, closest_local_line_a, max_search_distance_a) = -1; } /* More invalidating of results that may be affected by the choice of * most certain line. * Discard the matches for lines in B that are currently matched with a * line in A such that their ordering contradicts the ordering imposed * by the choice of most certain line. */ for (i = most_certain_local_line_b - 1; i >= invalidate_min; --i) { /* In this loop we discard results for lines in B that are * before most-certain-line-B but are matched with a line in A * that is after most-certain-line-A. */ if (certainties[i] >= 0 && (result[i] >= most_certain_line_a || second_best_result[i] >= most_certain_line_a)) { certainties[i] = CERTAINTY_NOT_CALCULATED; } } for (i = most_certain_local_line_b + 1; i < invalidate_max; ++i) { /* In this loop we discard results for lines in B that are * after most-certain-line-B but are matched with a line in A * that is before most-certain-line-A. */ if (certainties[i] >= 0 && (result[i] <= most_certain_line_a || second_best_result[i] <= most_certain_line_a)) { certainties[i] = CERTAINTY_NOT_CALCULATED; } } /* Repeat the matching process for lines before the most certain line. */ if (most_certain_local_line_b > 0) { fuzzy_find_matching_lines_recurse( start_a, start_b, most_certain_line_a + 1 - start_a, most_certain_local_line_b, fingerprints_a, fingerprints_b, similarities, certainties, second_best_result, result, max_search_distance_a, max_search_distance_b, map_line_number_in_b_to_a); } /* Repeat the matching process for lines after the most certain line. */ if (most_certain_local_line_b + 1 < length_b) { second_half_start_a = most_certain_line_a; offset_b = most_certain_local_line_b + 1; second_half_start_b = start_b + offset_b; second_half_length_a = length_a + start_a - second_half_start_a; second_half_length_b = length_b + start_b - second_half_start_b; fuzzy_find_matching_lines_recurse( second_half_start_a, second_half_start_b, second_half_length_a, second_half_length_b, fingerprints_a + second_half_start_a - start_a, fingerprints_b + offset_b, similarities + offset_b * (max_search_distance_a * 2 + 1), certainties + offset_b, second_best_result + offset_b, result + offset_b, max_search_distance_a, max_search_distance_b, map_line_number_in_b_to_a); } } /* Find the lines in the parent line range that most closely match the lines in * the target line range. This is accomplished by matching fingerprints in each * blame_origin, and choosing the best matches that preserve the line ordering. * See struct fingerprint for details of fingerprint matching, and * fuzzy_find_matching_lines_recurse for details of preserving line ordering. * * The performance is believed to be O(n log n) in the typical case and O(n^2) * in a pathological case, where n is the number of lines in the target range. */ static int *fuzzy_find_matching_lines(struct blame_origin *parent, struct blame_origin *target, int tlno, int parent_slno, int same, int parent_len) { /* We use the terminology "A" for the left hand side of the diff AKA * parent, and "B" for the right hand side of the diff AKA target. */ int start_a = parent_slno; int length_a = parent_len; int start_b = tlno; int length_b = same - tlno; struct line_number_mapping map_line_number_in_b_to_a = { start_a, length_a, start_b, length_b }; struct fingerprint *fingerprints_a = parent->fingerprints; struct fingerprint *fingerprints_b = target->fingerprints; int i, *result, *second_best_result, *certainties, *similarities, similarity_count; /* * max_search_distance_a means that given a line in B, compare it to * the line in A that is closest to its position, and the lines in A * that are no greater than max_search_distance_a lines away from the * closest line in A. * * max_search_distance_b is an upper bound on the greatest possible * distance between lines in B such that they will both be compared * with the same line in A according to max_search_distance_a. */ int max_search_distance_a = 10, max_search_distance_b; if (length_a <= 0) return NULL; if (max_search_distance_a >= length_a) max_search_distance_a = length_a ? length_a - 1 : 0; max_search_distance_b = ((2 * max_search_distance_a + 1) * length_b - 1) / length_a; CALLOC_ARRAY(result, length_b); CALLOC_ARRAY(second_best_result, length_b); CALLOC_ARRAY(certainties, length_b); /* See get_similarity() for details of similarities. */ similarity_count = length_b * (max_search_distance_a * 2 + 1); CALLOC_ARRAY(similarities, similarity_count); for (i = 0; i < length_b; ++i) { result[i] = -1; second_best_result[i] = -1; certainties[i] = CERTAINTY_NOT_CALCULATED; } for (i = 0; i < similarity_count; ++i) similarities[i] = -1; fuzzy_find_matching_lines_recurse(start_a, start_b, length_a, length_b, fingerprints_a + start_a, fingerprints_b + start_b, similarities, certainties, second_best_result, result, max_search_distance_a, max_search_distance_b, &map_line_number_in_b_to_a); free(similarities); free(certainties); free(second_best_result); return result; } static void fill_origin_fingerprints(struct blame_origin *o) { int *line_starts; if (o->fingerprints) return; o->num_lines = find_line_starts(&line_starts, o->file.ptr, o->file.size); CALLOC_ARRAY(o->fingerprints, o->num_lines); get_line_fingerprints(o->fingerprints, o->file.ptr, line_starts, 0, o->num_lines); free(line_starts); } static void drop_origin_fingerprints(struct blame_origin *o) { if (o->fingerprints) { free_line_fingerprints(o->fingerprints, o->num_lines); o->num_lines = 0; FREE_AND_NULL(o->fingerprints); } } /* * Given an origin, prepare mmfile_t structure to be used by the * diff machinery */ static void fill_origin_blob(struct diff_options *opt, struct blame_origin *o, mmfile_t *file, int *num_read_blob, int fill_fingerprints) { if (!o->file.ptr) { enum object_type type; unsigned long file_size; (*num_read_blob)++; if (opt->flags.allow_textconv && textconv_object(opt->repo, o->path, o->mode, &o->blob_oid, 1, &file->ptr, &file_size)) ; else file->ptr = repo_read_object_file(the_repository, &o->blob_oid, &type, &file_size); file->size = file_size; if (!file->ptr) die("Cannot read blob %s for path %s", oid_to_hex(&o->blob_oid), o->path); o->file = *file; } else *file = o->file; if (fill_fingerprints) fill_origin_fingerprints(o); } static void drop_origin_blob(struct blame_origin *o) { FREE_AND_NULL(o->file.ptr); drop_origin_fingerprints(o); } /* * Any merge of blames happens on lists of blames that arrived via * different parents in a single suspect. In this case, we want to * sort according to the suspect line numbers as opposed to the final * image line numbers. The function body is somewhat longish because * it avoids unnecessary writes. */ static struct blame_entry *blame_merge(struct blame_entry *list1, struct blame_entry *list2) { struct blame_entry *p1 = list1, *p2 = list2, **tail = &list1; if (!p1) return p2; if (!p2) return p1; if (p1->s_lno <= p2->s_lno) { do { tail = &p1->next; if (!(p1 = *tail)) { *tail = p2; return list1; } } while (p1->s_lno <= p2->s_lno); } for (;;) { *tail = p2; do { tail = &p2->next; if (!(p2 = *tail)) { *tail = p1; return list1; } } while (p1->s_lno > p2->s_lno); *tail = p1; do { tail = &p1->next; if (!(p1 = *tail)) { *tail = p2; return list1; } } while (p1->s_lno <= p2->s_lno); } } DEFINE_LIST_SORT(static, sort_blame_entries, struct blame_entry, next); /* * Final image line numbers are all different, so we don't need a * three-way comparison here. */ static int compare_blame_final(const struct blame_entry *e1, const struct blame_entry *e2) { return e1->lno > e2->lno ? 1 : -1; } static int compare_blame_suspect(const struct blame_entry *s1, const struct blame_entry *s2) { /* * to allow for collating suspects, we sort according to the * respective pointer value as the primary sorting criterion. * The actual relation is pretty unimportant as long as it * establishes a total order. Comparing as integers gives us * that. */ if (s1->suspect != s2->suspect) return (intptr_t)s1->suspect > (intptr_t)s2->suspect ? 1 : -1; if (s1->s_lno == s2->s_lno) return 0; return s1->s_lno > s2->s_lno ? 1 : -1; } void blame_sort_final(struct blame_scoreboard *sb) { sort_blame_entries(&sb->ent, compare_blame_final); } static int compare_commits_by_reverse_commit_date(const void *a, const void *b, void *c) { return -compare_commits_by_commit_date(a, b, c); } /* * For debugging -- origin is refcounted, and this asserts that * we do not underflow. */ static void sanity_check_refcnt(struct blame_scoreboard *sb) { int baa = 0; struct blame_entry *ent; for (ent = sb->ent; ent; ent = ent->next) { /* Nobody should have zero or negative refcnt */ if (ent->suspect->refcnt <= 0) { fprintf(stderr, "%s in %s has negative refcnt %d\n", ent->suspect->path, oid_to_hex(&ent->suspect->commit->object.oid), ent->suspect->refcnt); baa = 1; } } if (baa) sb->on_sanity_fail(sb, baa); } /* * If two blame entries that are next to each other came from * contiguous lines in the same origin (i.e. pair), * merge them together. */ void blame_coalesce(struct blame_scoreboard *sb) { struct blame_entry *ent, *next; for (ent = sb->ent; ent && (next = ent->next); ent = next) { if (ent->suspect == next->suspect && ent->s_lno + ent->num_lines == next->s_lno && ent->lno + ent->num_lines == next->lno && ent->ignored == next->ignored && ent->unblamable == next->unblamable) { ent->num_lines += next->num_lines; ent->next = next->next; blame_origin_decref(next->suspect); free(next); ent->score = 0; next = ent; /* again */ } } if (sb->debug) /* sanity */ sanity_check_refcnt(sb); } /* * Merge the given sorted list of blames into a preexisting origin. * If there were no previous blames to that commit, it is entered into * the commit priority queue of the score board. */ static void queue_blames(struct blame_scoreboard *sb, struct blame_origin *porigin, struct blame_entry *sorted) { if (porigin->suspects) porigin->suspects = blame_merge(porigin->suspects, sorted); else { struct blame_origin *o; for (o = get_blame_suspects(porigin->commit); o; o = o->next) { if (o->suspects) { porigin->suspects = sorted; return; } } porigin->suspects = sorted; prio_queue_put(&sb->commits, porigin->commit); } } /* * Fill the blob_sha1 field of an origin if it hasn't, so that later * call to fill_origin_blob() can use it to locate the data. blob_sha1 * for an origin is also used to pass the blame for the entire file to * the parent to detect the case where a child's blob is identical to * that of its parent's. * * This also fills origin->mode for corresponding tree path. */ static int fill_blob_sha1_and_mode(struct repository *r, struct blame_origin *origin) { if (!is_null_oid(&origin->blob_oid)) return 0; if (get_tree_entry(r, &origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode)) goto error_out; if (oid_object_info(r, &origin->blob_oid, NULL) != OBJ_BLOB) goto error_out; return 0; error_out: oidclr(&origin->blob_oid, the_repository->hash_algo); origin->mode = S_IFINVALID; return -1; } struct blame_bloom_data { /* * Changed-path Bloom filter keys. These can help prevent * computing diffs against first parents, but we need to * expand the list as code is moved or files are renamed. */ struct bloom_filter_settings *settings; struct bloom_key **keys; int nr; int alloc; }; static int bloom_count_queries = 0; static int bloom_count_no = 0; static int maybe_changed_path(struct repository *r, struct blame_origin *origin, struct blame_bloom_data *bd) { int i; struct bloom_filter *filter; if (!bd) return 1; if (commit_graph_generation(origin->commit) == GENERATION_NUMBER_INFINITY) return 1; filter = get_bloom_filter(r, origin->commit); if (!filter) return 1; bloom_count_queries++; for (i = 0; i < bd->nr; i++) { if (bloom_filter_contains(filter, bd->keys[i], bd->settings)) return 1; } bloom_count_no++; return 0; } static void add_bloom_key(struct blame_bloom_data *bd, const char *path) { if (!bd) return; if (bd->nr >= bd->alloc) { bd->alloc *= 2; REALLOC_ARRAY(bd->keys, bd->alloc); } bd->keys[bd->nr] = xmalloc(sizeof(struct bloom_key)); fill_bloom_key(path, strlen(path), bd->keys[bd->nr], bd->settings); bd->nr++; } /* * We have an origin -- check if the same path exists in the * parent and return an origin structure to represent it. */ static struct blame_origin *find_origin(struct repository *r, struct commit *parent, struct blame_origin *origin, struct blame_bloom_data *bd) { struct blame_origin *porigin; struct diff_options diff_opts; const char *paths[2]; /* First check any existing origins */ for (porigin = get_blame_suspects(parent); porigin; porigin = porigin->next) if (!strcmp(porigin->path, origin->path)) { /* * The same path between origin and its parent * without renaming -- the most common case. */ return blame_origin_incref (porigin); } /* See if the origin->path is different between parent * and origin first. Most of the time they are the * same and diff-tree is fairly efficient about this. */ repo_diff_setup(r, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.detect_rename = 0; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; paths[0] = origin->path; paths[1] = NULL; parse_pathspec(&diff_opts.pathspec, PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, PATHSPEC_LITERAL_PATH, "", paths); diff_setup_done(&diff_opts); if (is_null_oid(&origin->commit->object.oid)) do_diff_cache(get_commit_tree_oid(parent), &diff_opts); else { int compute_diff = 1; if (origin->commit->parents && oideq(&parent->object.oid, &origin->commit->parents->item->object.oid)) compute_diff = maybe_changed_path(r, origin, bd); if (compute_diff) diff_tree_oid(get_commit_tree_oid(parent), get_commit_tree_oid(origin->commit), "", &diff_opts); } diffcore_std(&diff_opts); if (!diff_queued_diff.nr) { /* The path is the same as parent */ porigin = get_origin(parent, origin->path); oidcpy(&porigin->blob_oid, &origin->blob_oid); porigin->mode = origin->mode; } else { /* * Since origin->path is a pathspec, if the parent * commit had it as a directory, we will see a whole * bunch of deletion of files in the directory that we * do not care about. */ int i; struct diff_filepair *p = NULL; for (i = 0; i < diff_queued_diff.nr; i++) { const char *name; p = diff_queued_diff.queue[i]; name = p->one->path ? p->one->path : p->two->path; if (!strcmp(name, origin->path)) break; } if (!p) die("internal error in blame::find_origin"); switch (p->status) { default: die("internal error in blame::find_origin (%c)", p->status); case 'M': porigin = get_origin(parent, origin->path); oidcpy(&porigin->blob_oid, &p->one->oid); porigin->mode = p->one->mode; break; case 'A': case 'T': /* Did not exist in parent, or type changed */ break; } } diff_flush(&diff_opts); return porigin; } /* * We have an origin -- find the path that corresponds to it in its * parent and return an origin structure to represent it. */ static struct blame_origin *find_rename(struct repository *r, struct commit *parent, struct blame_origin *origin, struct blame_bloom_data *bd) { struct blame_origin *porigin = NULL; struct diff_options diff_opts; int i; repo_diff_setup(r, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.detect_rename = DIFF_DETECT_RENAME; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; diff_opts.single_follow = origin->path; diff_setup_done(&diff_opts); if (is_null_oid(&origin->commit->object.oid)) do_diff_cache(get_commit_tree_oid(parent), &diff_opts); else diff_tree_oid(get_commit_tree_oid(parent), get_commit_tree_oid(origin->commit), "", &diff_opts); diffcore_std(&diff_opts); for (i = 0; i < diff_queued_diff.nr; i++) { struct diff_filepair *p = diff_queued_diff.queue[i]; if ((p->status == 'R' || p->status == 'C') && !strcmp(p->two->path, origin->path)) { add_bloom_key(bd, p->one->path); porigin = get_origin(parent, p->one->path); oidcpy(&porigin->blob_oid, &p->one->oid); porigin->mode = p->one->mode; break; } } diff_flush(&diff_opts); return porigin; } /* * Append a new blame entry to a given output queue. */ static void add_blame_entry(struct blame_entry ***queue, const struct blame_entry *src) { struct blame_entry *e = xmalloc(sizeof(*e)); memcpy(e, src, sizeof(*e)); blame_origin_incref(e->suspect); e->next = **queue; **queue = e; *queue = &e->next; } /* * src typically is on-stack; we want to copy the information in it to * a malloced blame_entry that gets added to the given queue. The * origin of dst loses a refcnt. */ static void dup_entry(struct blame_entry ***queue, struct blame_entry *dst, struct blame_entry *src) { blame_origin_incref(src->suspect); blame_origin_decref(dst->suspect); memcpy(dst, src, sizeof(*src)); dst->next = **queue; **queue = dst; *queue = &dst->next; } const char *blame_nth_line(struct blame_scoreboard *sb, long lno) { return sb->final_buf + sb->lineno[lno]; } /* * It is known that lines between tlno to same came from parent, and e * has an overlap with that range. it also is known that parent's * line plno corresponds to e's line tlno. * * <---- e -----> * <------> * <------------> * <------------> * <------------------> * * Split e into potentially three parts; before this chunk, the chunk * to be blamed for the parent, and after that portion. */ static void split_overlap(struct blame_entry *split, struct blame_entry *e, int tlno, int plno, int same, struct blame_origin *parent) { int chunk_end_lno; int i; memset(split, 0, sizeof(struct blame_entry [3])); for (i = 0; i < 3; i++) { split[i].ignored = e->ignored; split[i].unblamable = e->unblamable; } if (e->s_lno < tlno) { /* there is a pre-chunk part not blamed on parent */ split[0].suspect = blame_origin_incref(e->suspect); split[0].lno = e->lno; split[0].s_lno = e->s_lno; split[0].num_lines = tlno - e->s_lno; split[1].lno = e->lno + tlno - e->s_lno; split[1].s_lno = plno; } else { split[1].lno = e->lno; split[1].s_lno = plno + (e->s_lno - tlno); } if (same < e->s_lno + e->num_lines) { /* there is a post-chunk part not blamed on parent */ split[2].suspect = blame_origin_incref(e->suspect); split[2].lno = e->lno + (same - e->s_lno); split[2].s_lno = e->s_lno + (same - e->s_lno); split[2].num_lines = e->s_lno + e->num_lines - same; chunk_end_lno = split[2].lno; } else chunk_end_lno = e->lno + e->num_lines; split[1].num_lines = chunk_end_lno - split[1].lno; /* * if it turns out there is nothing to blame the parent for, * forget about the splitting. !split[1].suspect signals this. */ if (split[1].num_lines < 1) return; split[1].suspect = blame_origin_incref(parent); } /* * split_overlap() divided an existing blame e into up to three parts * in split. Any assigned blame is moved to queue to * reflect the split. */ static void split_blame(struct blame_entry ***blamed, struct blame_entry ***unblamed, struct blame_entry *split, struct blame_entry *e) { if (split[0].suspect && split[2].suspect) { /* The first part (reuse storage for the existing entry e) */ dup_entry(unblamed, e, &split[0]); /* The last part -- me */ add_blame_entry(unblamed, &split[2]); /* ... and the middle part -- parent */ add_blame_entry(blamed, &split[1]); } else if (!split[0].suspect && !split[2].suspect) /* * The parent covers the entire area; reuse storage for * e and replace it with the parent. */ dup_entry(blamed, e, &split[1]); else if (split[0].suspect) { /* me and then parent */ dup_entry(unblamed, e, &split[0]); add_blame_entry(blamed, &split[1]); } else { /* parent and then me */ dup_entry(blamed, e, &split[1]); add_blame_entry(unblamed, &split[2]); } } /* * After splitting the blame, the origins used by the * on-stack blame_entry should lose one refcnt each. */ static void decref_split(struct blame_entry *split) { int i; for (i = 0; i < 3; i++) blame_origin_decref(split[i].suspect); } /* * reverse_blame reverses the list given in head, appending tail. * That allows us to build lists in reverse order, then reverse them * afterwards. This can be faster than building the list in proper * order right away. The reason is that building in proper order * requires writing a link in the _previous_ element, while building * in reverse order just requires placing the list head into the * _current_ element. */ static struct blame_entry *reverse_blame(struct blame_entry *head, struct blame_entry *tail) { while (head) { struct blame_entry *next = head->next; head->next = tail; tail = head; head = next; } return tail; } /* * Splits a blame entry into two entries at 'len' lines. The original 'e' * consists of len lines, i.e. [e->lno, e->lno + len), and the second part, * which is returned, consists of the remainder: [e->lno + len, e->lno + * e->num_lines). The caller needs to sort out the reference counting for the * new entry's suspect. */ static struct blame_entry *split_blame_at(struct blame_entry *e, int len, struct blame_origin *new_suspect) { struct blame_entry *n = xcalloc(1, sizeof(struct blame_entry)); n->suspect = new_suspect; n->ignored = e->ignored; n->unblamable = e->unblamable; n->lno = e->lno + len; n->s_lno = e->s_lno + len; n->num_lines = e->num_lines - len; e->num_lines = len; e->score = 0; return n; } struct blame_line_tracker { int is_parent; int s_lno; }; static int are_lines_adjacent(struct blame_line_tracker *first, struct blame_line_tracker *second) { return first->is_parent == second->is_parent && first->s_lno + 1 == second->s_lno; } static int scan_parent_range(struct fingerprint *p_fps, struct fingerprint *t_fps, int t_idx, int from, int nr_lines) { int sim, p_idx; #define FINGERPRINT_FILE_THRESHOLD 10 int best_sim_val = FINGERPRINT_FILE_THRESHOLD; int best_sim_idx = -1; for (p_idx = from; p_idx < from + nr_lines; p_idx++) { sim = fingerprint_similarity(&t_fps[t_idx], &p_fps[p_idx]); if (sim < best_sim_val) continue; /* Break ties with the closest-to-target line number */ if (sim == best_sim_val && best_sim_idx != -1 && abs(best_sim_idx - t_idx) < abs(p_idx - t_idx)) continue; best_sim_val = sim; best_sim_idx = p_idx; } return best_sim_idx; } /* * The first pass checks the blame entry (from the target) against the parent's * diff chunk. If that fails for a line, the second pass tries to match that * line to any part of parent file. That catches cases where a change was * broken into two chunks by 'context.' */ static void guess_line_blames(struct blame_origin *parent, struct blame_origin *target, int tlno, int offset, int same, int parent_len, struct blame_line_tracker *line_blames) { int i, best_idx, target_idx; int parent_slno = tlno + offset; int *fuzzy_matches; fuzzy_matches = fuzzy_find_matching_lines(parent, target, tlno, parent_slno, same, parent_len); for (i = 0; i < same - tlno; i++) { target_idx = tlno + i; if (fuzzy_matches && fuzzy_matches[i] >= 0) { best_idx = fuzzy_matches[i]; } else { best_idx = scan_parent_range(parent->fingerprints, target->fingerprints, target_idx, 0, parent->num_lines); } if (best_idx >= 0) { line_blames[i].is_parent = 1; line_blames[i].s_lno = best_idx; } else { line_blames[i].is_parent = 0; line_blames[i].s_lno = target_idx; } } free(fuzzy_matches); } /* * This decides which parts of a blame entry go to the parent (added to the * ignoredp list) and which stay with the target (added to the diffp list). The * actual decision was made in a separate heuristic function, and those answers * for the lines in 'e' are in line_blames. This consumes e, essentially * putting it on a list. * * Note that the blame entries on the ignoredp list are not necessarily sorted * with respect to the parent's line numbers yet. */ static void ignore_blame_entry(struct blame_entry *e, struct blame_origin *parent, struct blame_entry **diffp, struct blame_entry **ignoredp, struct blame_line_tracker *line_blames) { int entry_len, nr_lines, i; /* * We carve new entries off the front of e. Each entry comes from a * contiguous chunk of lines: adjacent lines from the same origin * (either the parent or the target). */ entry_len = 1; nr_lines = e->num_lines; /* e changes in the loop */ for (i = 0; i < nr_lines; i++) { struct blame_entry *next = NULL; /* * We are often adjacent to the next line - only split the blame * entry when we have to. */ if (i + 1 < nr_lines) { if (are_lines_adjacent(&line_blames[i], &line_blames[i + 1])) { entry_len++; continue; } next = split_blame_at(e, entry_len, blame_origin_incref(e->suspect)); } if (line_blames[i].is_parent) { e->ignored = 1; blame_origin_decref(e->suspect); e->suspect = blame_origin_incref(parent); e->s_lno = line_blames[i - entry_len + 1].s_lno; e->next = *ignoredp; *ignoredp = e; } else { e->unblamable = 1; /* e->s_lno is already in the target's address space. */ e->next = *diffp; *diffp = e; } assert(e->num_lines == entry_len); e = next; entry_len = 1; } assert(!e); } /* * Process one hunk from the patch between the current suspect for * blame_entry e and its parent. This first blames any unfinished * entries before the chunk (which is where target and parent start * differing) on the parent, and then splits blame entries at the * start and at the end of the difference region. Since use of -M and * -C options may lead to overlapping/duplicate source line number * ranges, all we can rely on from sorting/merging is the order of the * first suspect line number. * * tlno: line number in the target where this chunk begins * same: line number in the target where this chunk ends * offset: add to tlno to get the chunk starting point in the parent * parent_len: number of lines in the parent chunk */ static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq, int tlno, int offset, int same, int parent_len, struct blame_origin *parent, struct blame_origin *target, int ignore_diffs) { struct blame_entry *e = **srcq; struct blame_entry *samep = NULL, *diffp = NULL, *ignoredp = NULL; struct blame_line_tracker *line_blames = NULL; while (e && e->s_lno < tlno) { struct blame_entry *next = e->next; /* * current record starts before differing portion. If * it reaches into it, we need to split it up and * examine the second part separately. */ if (e->s_lno + e->num_lines > tlno) { /* Move second half to a new record */ struct blame_entry *n; n = split_blame_at(e, tlno - e->s_lno, e->suspect); /* Push new record to diffp */ n->next = diffp; diffp = n; } else blame_origin_decref(e->suspect); /* Pass blame for everything before the differing * chunk to the parent */ e->suspect = blame_origin_incref(parent); e->s_lno += offset; e->next = samep; samep = e; e = next; } /* * As we don't know how much of a common stretch after this * diff will occur, the currently blamed parts are all that we * can assign to the parent for now. */ if (samep) { **dstq = reverse_blame(samep, **dstq); *dstq = &samep->next; } /* * Prepend the split off portions: everything after e starts * after the blameable portion. */ e = reverse_blame(diffp, e); /* * Now retain records on the target while parts are different * from the parent. */ samep = NULL; diffp = NULL; if (ignore_diffs && same - tlno > 0) { CALLOC_ARRAY(line_blames, same - tlno); guess_line_blames(parent, target, tlno, offset, same, parent_len, line_blames); } while (e && e->s_lno < same) { struct blame_entry *next = e->next; /* * If current record extends into sameness, need to split. */ if (e->s_lno + e->num_lines > same) { /* * Move second half to a new record to be * processed by later chunks */ struct blame_entry *n; n = split_blame_at(e, same - e->s_lno, blame_origin_incref(e->suspect)); /* Push new record to samep */ n->next = samep; samep = n; } if (ignore_diffs) { ignore_blame_entry(e, parent, &diffp, &ignoredp, line_blames + e->s_lno - tlno); } else { e->next = diffp; diffp = e; } e = next; } free(line_blames); if (ignoredp) { /* * Note ignoredp is not sorted yet, and thus neither is dstq. * That list must be sorted before we queue_blames(). We defer * sorting until after all diff hunks are processed, so that * guess_line_blames() can pick *any* line in the parent. The * slight drawback is that we end up sorting all blame entries * passed to the parent, including those that are unrelated to * changes made by the ignored commit. */ **dstq = reverse_blame(ignoredp, **dstq); *dstq = &ignoredp->next; } **srcq = reverse_blame(diffp, reverse_blame(samep, e)); /* Move across elements that are in the unblamable portion */ if (diffp) *srcq = &diffp->next; } struct blame_chunk_cb_data { struct blame_origin *parent; struct blame_origin *target; long offset; int ignore_diffs; struct blame_entry **dstq; struct blame_entry **srcq; }; /* diff chunks are from parent to target */ static int blame_chunk_cb(long start_a, long count_a, long start_b, long count_b, void *data) { struct blame_chunk_cb_data *d = data; if (start_a - start_b != d->offset) die("internal error in blame::blame_chunk_cb"); blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b, start_b + count_b, count_a, d->parent, d->target, d->ignore_diffs); d->offset = start_a + count_a - (start_b + count_b); return 0; } /* * We are looking at the origin 'target' and aiming to pass blame * for the lines it is suspected to its parent. Run diff to find * which lines came from parent and pass blame for them. */ static void pass_blame_to_parent(struct blame_scoreboard *sb, struct blame_origin *target, struct blame_origin *parent, int ignore_diffs) { mmfile_t file_p, file_o; struct blame_chunk_cb_data d; struct blame_entry *newdest = NULL; if (!target->suspects) return; /* nothing remains for this target */ d.parent = parent; d.target = target; d.offset = 0; d.ignore_diffs = ignore_diffs; d.dstq = &newdest; d.srcq = &target->suspects; fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob, ignore_diffs); fill_origin_blob(&sb->revs->diffopt, target, &file_o, &sb->num_read_blob, ignore_diffs); sb->num_get_patch++; if (diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts)) die("unable to generate diff (%s -> %s)", oid_to_hex(&parent->commit->object.oid), oid_to_hex(&target->commit->object.oid)); /* The rest are the same as the parent */ blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, 0, parent, target, 0); *d.dstq = NULL; if (ignore_diffs) sort_blame_entries(&newdest, compare_blame_suspect); queue_blames(sb, parent, newdest); return; } /* * The lines in blame_entry after splitting blames many times can become * very small and trivial, and at some point it becomes pointless to * blame the parents. E.g. "\t\t}\n\t}\n\n" appears everywhere in any * ordinary C program, and it is not worth to say it was copied from * totally unrelated file in the parent. * * Compute how trivial the lines in the blame_entry are. */ unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e) { unsigned score; const char *cp, *ep; if (e->score) return e->score; score = 1; cp = blame_nth_line(sb, e->lno); ep = blame_nth_line(sb, e->lno + e->num_lines); while (cp < ep) { unsigned ch = *((unsigned char *)cp); if (isalnum(ch)) score++; cp++; } e->score = score; return score; } /* * best_so_far[] and potential[] are both a split of an existing blame_entry * that passes blame to the parent. Maintain best_so_far the best split so * far, by comparing potential and best_so_far and copying potential into * bst_so_far as needed. */ static void copy_split_if_better(struct blame_scoreboard *sb, struct blame_entry *best_so_far, struct blame_entry *potential) { int i; if (!potential[1].suspect) return; if (best_so_far[1].suspect) { if (blame_entry_score(sb, &potential[1]) < blame_entry_score(sb, &best_so_far[1])) return; } for (i = 0; i < 3; i++) blame_origin_incref(potential[i].suspect); decref_split(best_so_far); memcpy(best_so_far, potential, sizeof(struct blame_entry[3])); } /* * We are looking at a part of the final image represented by * ent (tlno and same are offset by ent->s_lno). * tlno is where we are looking at in the final image. * up to (but not including) same match preimage. * plno is where we are looking at in the preimage. * * <-------------- final image ----------------------> * <------ent------> * ^tlno ^same * <---------preimage-----> * ^plno * * All line numbers are 0-based. */ static void handle_split(struct blame_scoreboard *sb, struct blame_entry *ent, int tlno, int plno, int same, struct blame_origin *parent, struct blame_entry *split) { if (ent->num_lines <= tlno) return; if (tlno < same) { struct blame_entry potential[3]; tlno += ent->s_lno; same += ent->s_lno; split_overlap(potential, ent, tlno, plno, same, parent); copy_split_if_better(sb, split, potential); decref_split(potential); } } struct handle_split_cb_data { struct blame_scoreboard *sb; struct blame_entry *ent; struct blame_origin *parent; struct blame_entry *split; long plno; long tlno; }; static int handle_split_cb(long start_a, long count_a, long start_b, long count_b, void *data) { struct handle_split_cb_data *d = data; handle_split(d->sb, d->ent, d->tlno, d->plno, start_b, d->parent, d->split); d->plno = start_a + count_a; d->tlno = start_b + count_b; return 0; } /* * Find the lines from parent that are the same as ent so that * we can pass blames to it. file_p has the blob contents for * the parent. */ static void find_copy_in_blob(struct blame_scoreboard *sb, struct blame_entry *ent, struct blame_origin *parent, struct blame_entry *split, mmfile_t *file_p) { const char *cp; mmfile_t file_o; struct handle_split_cb_data d; memset(&d, 0, sizeof(d)); d.sb = sb; d.ent = ent; d.parent = parent; d.split = split; /* * Prepare mmfile that contains only the lines in ent. */ cp = blame_nth_line(sb, ent->lno); file_o.ptr = (char *) cp; file_o.size = blame_nth_line(sb, ent->lno + ent->num_lines) - cp; /* * file_o is a part of final image we are annotating. * file_p partially may match that image. */ memset(split, 0, sizeof(struct blame_entry [3])); if (diff_hunks(file_p, &file_o, handle_split_cb, &d, sb->xdl_opts)) die("unable to generate diff (%s)", oid_to_hex(&parent->commit->object.oid)); /* remainder, if any, all match the preimage */ handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split); } /* Move all blame entries from list *source that have a score smaller * than score_min to the front of list *small. * Returns a pointer to the link pointing to the old head of the small list. */ static struct blame_entry **filter_small(struct blame_scoreboard *sb, struct blame_entry **small, struct blame_entry **source, unsigned score_min) { struct blame_entry *p = *source; struct blame_entry *oldsmall = *small; while (p) { if (blame_entry_score(sb, p) <= score_min) { *small = p; small = &p->next; p = *small; } else { *source = p; source = &p->next; p = *source; } } *small = oldsmall; *source = NULL; return small; } /* * See if lines currently target is suspected for can be attributed to * parent. */ static void find_move_in_parent(struct blame_scoreboard *sb, struct blame_entry ***blamed, struct blame_entry **toosmall, struct blame_origin *target, struct blame_origin *parent) { struct blame_entry *e, split[3]; struct blame_entry *unblamed = target->suspects; struct blame_entry *leftover = NULL; mmfile_t file_p; if (!unblamed) return; /* nothing remains for this target */ fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob, 0); if (!file_p.ptr) return; /* At each iteration, unblamed has a NULL-terminated list of * entries that have not yet been tested for blame. leftover * contains the reversed list of entries that have been tested * without being assignable to the parent. */ do { struct blame_entry **unblamedtail = &unblamed; struct blame_entry *next; for (e = unblamed; e; e = next) { next = e->next; find_copy_in_blob(sb, e, parent, split, &file_p); if (split[1].suspect && sb->move_score < blame_entry_score(sb, &split[1])) { split_blame(blamed, &unblamedtail, split, e); } else { e->next = leftover; leftover = e; } decref_split(split); } *unblamedtail = NULL; toosmall = filter_small(sb, toosmall, &unblamed, sb->move_score); } while (unblamed); target->suspects = reverse_blame(leftover, NULL); } struct blame_list { struct blame_entry *ent; struct blame_entry split[3]; }; /* * Count the number of entries the target is suspected for, * and prepare a list of entry and the best split. */ static struct blame_list *setup_blame_list(struct blame_entry *unblamed, int *num_ents_p) { struct blame_entry *e; int num_ents, i; struct blame_list *blame_list = NULL; for (e = unblamed, num_ents = 0; e; e = e->next) num_ents++; if (num_ents) { CALLOC_ARRAY(blame_list, num_ents); for (e = unblamed, i = 0; e; e = e->next) blame_list[i++].ent = e; } *num_ents_p = num_ents; return blame_list; } /* * For lines target is suspected for, see if we can find code movement * across file boundary from the parent commit. porigin is the path * in the parent we already tried. */ static void find_copy_in_parent(struct blame_scoreboard *sb, struct blame_entry ***blamed, struct blame_entry **toosmall, struct blame_origin *target, struct commit *parent, struct blame_origin *porigin, int opt) { struct diff_options diff_opts; int i, j; struct blame_list *blame_list; int num_ents; struct blame_entry *unblamed = target->suspects; struct blame_entry *leftover = NULL; if (!unblamed) return; /* nothing remains for this target */ repo_diff_setup(sb->repo, &diff_opts); diff_opts.flags.recursive = 1; diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; diff_setup_done(&diff_opts); /* Try "find copies harder" on new path if requested; * we do not want to use diffcore_rename() actually to * match things up; find_copies_harder is set only to * force diff_tree_oid() to feed all filepairs to diff_queue, * and this code needs to be after diff_setup_done(), which * usually makes find-copies-harder imply copy detection. */ if ((opt & PICKAXE_BLAME_COPY_HARDEST) || ((opt & PICKAXE_BLAME_COPY_HARDER) && (!porigin || strcmp(target->path, porigin->path)))) diff_opts.flags.find_copies_harder = 1; if (is_null_oid(&target->commit->object.oid)) do_diff_cache(get_commit_tree_oid(parent), &diff_opts); else diff_tree_oid(get_commit_tree_oid(parent), get_commit_tree_oid(target->commit), "", &diff_opts); if (!diff_opts.flags.find_copies_harder) diffcore_std(&diff_opts); do { struct blame_entry **unblamedtail = &unblamed; blame_list = setup_blame_list(unblamed, &num_ents); for (i = 0; i < diff_queued_diff.nr; i++) { struct diff_filepair *p = diff_queued_diff.queue[i]; struct blame_origin *norigin; mmfile_t file_p; struct blame_entry potential[3]; if (!DIFF_FILE_VALID(p->one)) continue; /* does not exist in parent */ if (S_ISGITLINK(p->one->mode)) continue; /* ignore git links */ if (porigin && !strcmp(p->one->path, porigin->path)) /* find_move already dealt with this path */ continue; norigin = get_origin(parent, p->one->path); oidcpy(&norigin->blob_oid, &p->one->oid); norigin->mode = p->one->mode; fill_origin_blob(&sb->revs->diffopt, norigin, &file_p, &sb->num_read_blob, 0); if (!file_p.ptr) continue; for (j = 0; j < num_ents; j++) { find_copy_in_blob(sb, blame_list[j].ent, norigin, potential, &file_p); copy_split_if_better(sb, blame_list[j].split, potential); decref_split(potential); } blame_origin_decref(norigin); } for (j = 0; j < num_ents; j++) { struct blame_entry *split = blame_list[j].split; if (split[1].suspect && sb->copy_score < blame_entry_score(sb, &split[1])) { split_blame(blamed, &unblamedtail, split, blame_list[j].ent); } else { blame_list[j].ent->next = leftover; leftover = blame_list[j].ent; } decref_split(split); } free(blame_list); *unblamedtail = NULL; toosmall = filter_small(sb, toosmall, &unblamed, sb->copy_score); } while (unblamed); target->suspects = reverse_blame(leftover, NULL); diff_flush(&diff_opts); } /* * The blobs of origin and porigin exactly match, so everything * origin is suspected for can be blamed on the parent. */ static void pass_whole_blame(struct blame_scoreboard *sb, struct blame_origin *origin, struct blame_origin *porigin) { struct blame_entry *e, *suspects; if (!porigin->file.ptr && origin->file.ptr) { /* Steal its file */ porigin->file = origin->file; origin->file.ptr = NULL; } suspects = origin->suspects; origin->suspects = NULL; for (e = suspects; e; e = e->next) { blame_origin_incref(porigin); blame_origin_decref(e->suspect); e->suspect = porigin; } queue_blames(sb, porigin, suspects); } /* * We pass blame from the current commit to its parents. We keep saying * "parent" (and "porigin"), but what we mean is to find scapegoat to * exonerate ourselves. */ static struct commit_list *first_scapegoat(struct rev_info *revs, struct commit *commit, int reverse) { if (!reverse) { if (revs->first_parent_only && commit->parents && commit->parents->next) { free_commit_list(commit->parents->next); commit->parents->next = NULL; } return commit->parents; } return lookup_decoration(&revs->children, &commit->object); } static int num_scapegoats(struct rev_info *revs, struct commit *commit, int reverse) { struct commit_list *l = first_scapegoat(revs, commit, reverse); return commit_list_count(l); } /* Distribute collected unsorted blames to the respected sorted lists * in the various origins. */ static void distribute_blame(struct blame_scoreboard *sb, struct blame_entry *blamed) { sort_blame_entries(&blamed, compare_blame_suspect); while (blamed) { struct blame_origin *porigin = blamed->suspect; struct blame_entry *suspects = NULL; do { struct blame_entry *next = blamed->next; blamed->next = suspects; suspects = blamed; blamed = next; } while (blamed && blamed->suspect == porigin); suspects = reverse_blame(suspects, NULL); queue_blames(sb, porigin, suspects); } } #define MAXSG 16 typedef struct blame_origin *(*blame_find_alg)(struct repository *, struct commit *, struct blame_origin *, struct blame_bloom_data *); static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin, int opt) { struct rev_info *revs = sb->revs; int i, pass, num_sg; struct commit *commit = origin->commit; struct commit_list *sg; struct blame_origin *sg_buf[MAXSG]; struct blame_origin *porigin, **sg_origin = sg_buf; struct blame_entry *toosmall = NULL; struct blame_entry *blames, **blametail = &blames; num_sg = num_scapegoats(revs, commit, sb->reverse); if (!num_sg) goto finish; else if (num_sg < ARRAY_SIZE(sg_buf)) memset(sg_buf, 0, sizeof(sg_buf)); else CALLOC_ARRAY(sg_origin, num_sg); /* * The first pass looks for unrenamed path to optimize for * common cases, then we look for renames in the second pass. */ for (pass = 0; pass < 2 - sb->no_whole_file_rename; pass++) { blame_find_alg find = pass ? find_rename : find_origin; for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); i < num_sg && sg; sg = sg->next, i++) { struct commit *p = sg->item; int j, same; if (sg_origin[i]) continue; if (repo_parse_commit(the_repository, p)) continue; porigin = find(sb->repo, p, origin, sb->bloom_data); if (!porigin) continue; if (oideq(&porigin->blob_oid, &origin->blob_oid)) { pass_whole_blame(sb, origin, porigin); blame_origin_decref(porigin); goto finish; } for (j = same = 0; j < i; j++) if (sg_origin[j] && oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) { same = 1; break; } if (!same) sg_origin[i] = porigin; else blame_origin_decref(porigin); } } sb->num_commits++; for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); i < num_sg && sg; sg = sg->next, i++) { struct blame_origin *porigin = sg_origin[i]; if (!porigin) continue; if (!origin->previous) { blame_origin_incref(porigin); origin->previous = porigin; } pass_blame_to_parent(sb, origin, porigin, 0); if (!origin->suspects) goto finish; } /* * Pass remaining suspects for ignored commits to their parents. */ if (oidset_contains(&sb->ignore_list, &commit->object.oid)) { for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); i < num_sg && sg; sg = sg->next, i++) { struct blame_origin *porigin = sg_origin[i]; if (!porigin) continue; pass_blame_to_parent(sb, origin, porigin, 1); /* * Preemptively drop porigin so we can refresh the * fingerprints if we use the parent again, which can * occur if you ignore back-to-back commits. */ drop_origin_blob(porigin); if (!origin->suspects) goto finish; } } /* * Optionally find moves in parents' files. */ if (opt & PICKAXE_BLAME_MOVE) { filter_small(sb, &toosmall, &origin->suspects, sb->move_score); if (origin->suspects) { for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); i < num_sg && sg; sg = sg->next, i++) { struct blame_origin *porigin = sg_origin[i]; if (!porigin) continue; find_move_in_parent(sb, &blametail, &toosmall, origin, porigin); if (!origin->suspects) break; } } } /* * Optionally find copies from parents' files. */ if (opt & PICKAXE_BLAME_COPY) { if (sb->copy_score > sb->move_score) filter_small(sb, &toosmall, &origin->suspects, sb->copy_score); else if (sb->copy_score < sb->move_score) { origin->suspects = blame_merge(origin->suspects, toosmall); toosmall = NULL; filter_small(sb, &toosmall, &origin->suspects, sb->copy_score); } if (!origin->suspects) goto finish; for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); i < num_sg && sg; sg = sg->next, i++) { struct blame_origin *porigin = sg_origin[i]; find_copy_in_parent(sb, &blametail, &toosmall, origin, sg->item, porigin, opt); if (!origin->suspects) goto finish; } } finish: *blametail = NULL; distribute_blame(sb, blames); /* * prepend toosmall to origin->suspects * * There is no point in sorting: this ends up on a big * unsorted list in the caller anyway. */ if (toosmall) { struct blame_entry **tail = &toosmall; while (*tail) tail = &(*tail)->next; *tail = origin->suspects; origin->suspects = toosmall; } for (i = 0; i < num_sg; i++) { if (sg_origin[i]) { if (!sg_origin[i]->suspects) drop_origin_blob(sg_origin[i]); blame_origin_decref(sg_origin[i]); } } drop_origin_blob(origin); if (sg_buf != sg_origin) free(sg_origin); } /* * The main loop -- while we have blobs with lines whose true origin * is still unknown, pick one blob, and allow its lines to pass blames * to its parents. */ void assign_blame(struct blame_scoreboard *sb, int opt) { struct rev_info *revs = sb->revs; struct commit *commit = prio_queue_get(&sb->commits); while (commit) { struct blame_entry *ent; struct blame_origin *suspect = get_blame_suspects(commit); /* find one suspect to break down */ while (suspect && !suspect->suspects) suspect = suspect->next; if (!suspect) { commit = prio_queue_get(&sb->commits); continue; } assert(commit == suspect->commit); /* * We will use this suspect later in the loop, * so hold onto it in the meantime. */ blame_origin_incref(suspect); repo_parse_commit(the_repository, commit); if (sb->reverse || (!(commit->object.flags & UNINTERESTING) && !(revs->max_age != -1 && commit->date < revs->max_age))) pass_blame(sb, suspect, opt); else { commit->object.flags |= UNINTERESTING; if (commit->object.parsed) mark_parents_uninteresting(sb->revs, commit); } /* treat root commit as boundary */ if (!commit->parents && !sb->show_root) commit->object.flags |= UNINTERESTING; /* Take responsibility for the remaining entries */ ent = suspect->suspects; if (ent) { suspect->guilty = 1; for (;;) { struct blame_entry *next = ent->next; if (sb->found_guilty_entry) sb->found_guilty_entry(ent, sb->found_guilty_entry_data); if (next) { ent = next; continue; } ent->next = sb->ent; sb->ent = suspect->suspects; suspect->suspects = NULL; break; } } blame_origin_decref(suspect); if (sb->debug) /* sanity */ sanity_check_refcnt(sb); } } /* * To allow quick access to the contents of nth line in the * final image, prepare an index in the scoreboard. */ static int prepare_lines(struct blame_scoreboard *sb) { sb->num_lines = find_line_starts(&sb->lineno, sb->final_buf, sb->final_buf_size); return sb->num_lines; } static struct commit *find_single_final(struct rev_info *revs, const char **name_p) { int i; struct commit *found = NULL; const char *name = NULL; for (i = 0; i < revs->pending.nr; i++) { struct object *obj = revs->pending.objects[i].item; if (obj->flags & UNINTERESTING) continue; obj = deref_tag(revs->repo, obj, NULL, 0); if (!obj || obj->type != OBJ_COMMIT) die("Non commit %s?", revs->pending.objects[i].name); if (found) die("More than one commit to dig from %s and %s?", revs->pending.objects[i].name, name); found = (struct commit *)obj; name = revs->pending.objects[i].name; } if (name_p) *name_p = xstrdup_or_null(name); return found; } static struct commit *dwim_reverse_initial(struct rev_info *revs, const char **name_p) { /* * DWIM "git blame --reverse ONE -- PATH" as * "git blame --reverse ONE..HEAD -- PATH" but only do so * when it makes sense. */ struct object *obj; struct commit *head_commit; struct object_id head_oid; if (revs->pending.nr != 1) return NULL; /* Is that sole rev a committish? */ obj = revs->pending.objects[0].item; obj = deref_tag(revs->repo, obj, NULL, 0); if (!obj || obj->type != OBJ_COMMIT) return NULL; /* Do we have HEAD? */ if (!refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", RESOLVE_REF_READING, &head_oid, NULL)) return NULL; head_commit = lookup_commit_reference_gently(revs->repo, &head_oid, 1); if (!head_commit) return NULL; /* Turn "ONE" into "ONE..HEAD" then */ obj->flags |= UNINTERESTING; add_pending_object(revs, &head_commit->object, "HEAD"); if (name_p) *name_p = revs->pending.objects[0].name; return (struct commit *)obj; } static struct commit *find_single_initial(struct rev_info *revs, const char **name_p) { int i; struct commit *found = NULL; const char *name = NULL; /* * There must be one and only one negative commit, and it must be * the boundary. */ for (i = 0; i < revs->pending.nr; i++) { struct object *obj = revs->pending.objects[i].item; if (!(obj->flags & UNINTERESTING)) continue; obj = deref_tag(revs->repo, obj, NULL, 0); if (!obj || obj->type != OBJ_COMMIT) die("Non commit %s?", revs->pending.objects[i].name); if (found) die("More than one commit to dig up from, %s and %s?", revs->pending.objects[i].name, name); found = (struct commit *) obj; name = revs->pending.objects[i].name; } if (!name) found = dwim_reverse_initial(revs, &name); if (!name) die("No commit to dig up from?"); if (name_p) *name_p = xstrdup(name); return found; } void init_scoreboard(struct blame_scoreboard *sb) { memset(sb, 0, sizeof(struct blame_scoreboard)); sb->move_score = BLAME_DEFAULT_MOVE_SCORE; sb->copy_score = BLAME_DEFAULT_COPY_SCORE; } void setup_scoreboard(struct blame_scoreboard *sb, struct blame_origin **orig) { const char *final_commit_name = NULL; struct blame_origin *o; struct commit *final_commit = NULL; enum object_type type; init_blame_suspects(&blame_suspects); if (sb->reverse && sb->contents_from) die(_("--contents and --reverse do not blend well.")); if (!sb->repo) BUG("repo is NULL"); if (!sb->reverse) { sb->final = find_single_final(sb->revs, &final_commit_name); sb->commits.compare = compare_commits_by_commit_date; } else { sb->final = find_single_initial(sb->revs, &final_commit_name); sb->commits.compare = compare_commits_by_reverse_commit_date; } if (sb->reverse && sb->revs->first_parent_only) sb->revs->children.name = NULL; if (sb->contents_from || !sb->final) { struct object_id head_oid, *parent_oid; /* * Build a fake commit at the top of the history, when * (1) "git blame [^A] --path", i.e. with no positive end * of the history range, in which case we build such * a fake commit on top of the HEAD to blame in-tree * modifications. * (2) "git blame --contents=file [A] -- path", with or * without positive end of the history range but with * --contents, in which case we pretend that there is * a fake commit on top of the positive end (defaulting to * HEAD) that has the given contents in the path. */ if (sb->final) { parent_oid = &sb->final->object.oid; } else { if (!refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", RESOLVE_REF_READING, &head_oid, NULL)) die("no such ref: HEAD"); parent_oid = &head_oid; } if (!sb->contents_from) setup_work_tree(); sb->final = fake_working_tree_commit(sb->repo, &sb->revs->diffopt, sb->path, sb->contents_from, parent_oid); add_pending_object(sb->revs, &(sb->final->object), ":"); } if (sb->reverse && sb->revs->first_parent_only) { final_commit = find_single_final(sb->revs, NULL); if (!final_commit) die(_("--reverse and --first-parent together require specified latest commit")); } /* * If we have bottom, this will mark the ancestors of the * bottom commits we would reach while traversing as * uninteresting. */ if (prepare_revision_walk(sb->revs)) die(_("revision walk setup failed")); if (sb->reverse && sb->revs->first_parent_only) { struct commit *c = final_commit; sb->revs->children.name = "children"; while (c->parents && !oideq(&c->object.oid, &sb->final->object.oid)) { struct commit_list *l = xcalloc(1, sizeof(*l)); l->item = c; if (add_decoration(&sb->revs->children, &c->parents->item->object, l)) BUG("not unique item in first-parent chain"); c = c->parents->item; } if (!oideq(&c->object.oid, &sb->final->object.oid)) die(_("--reverse --first-parent together require range along first-parent chain")); } if (is_null_oid(&sb->final->object.oid)) { o = get_blame_suspects(sb->final); sb->final_buf = xmemdupz(o->file.ptr, o->file.size); sb->final_buf_size = o->file.size; } else { o = get_origin(sb->final, sb->path); if (fill_blob_sha1_and_mode(sb->repo, o)) die(_("no such path %s in %s"), sb->path, final_commit_name); if (sb->revs->diffopt.flags.allow_textconv && textconv_object(sb->repo, sb->path, o->mode, &o->blob_oid, 1, (char **) &sb->final_buf, &sb->final_buf_size)) ; else sb->final_buf = repo_read_object_file(the_repository, &o->blob_oid, &type, &sb->final_buf_size); if (!sb->final_buf) die(_("cannot read blob %s for path %s"), oid_to_hex(&o->blob_oid), sb->path); } sb->num_read_blob++; prepare_lines(sb); if (orig) *orig = o; free((char *)final_commit_name); } struct blame_entry *blame_entry_prepend(struct blame_entry *head, long start, long end, struct blame_origin *o) { struct blame_entry *new_head = xcalloc(1, sizeof(struct blame_entry)); new_head->lno = start; new_head->num_lines = end - start; new_head->suspect = o; new_head->s_lno = start; new_head->next = head; blame_origin_incref(o); return new_head; } void setup_blame_bloom_data(struct blame_scoreboard *sb) { struct blame_bloom_data *bd; struct bloom_filter_settings *bs; if (!sb->repo->objects->commit_graph) return; bs = get_bloom_filter_settings(sb->repo); if (!bs) return; bd = xmalloc(sizeof(struct blame_bloom_data)); bd->settings = bs; bd->alloc = 4; bd->nr = 0; ALLOC_ARRAY(bd->keys, bd->alloc); add_bloom_key(bd, sb->path); sb->bloom_data = bd; } void cleanup_scoreboard(struct blame_scoreboard *sb) { free(sb->lineno); free(sb->final_buf); clear_prio_queue(&sb->commits); oidset_clear(&sb->ignore_list); if (sb->bloom_data) { int i; for (i = 0; i < sb->bloom_data->nr; i++) { free(sb->bloom_data->keys[i]->hashes); free(sb->bloom_data->keys[i]); } free(sb->bloom_data->keys); FREE_AND_NULL(sb->bloom_data); trace2_data_intmax("blame", sb->repo, "bloom/queries", bloom_count_queries); trace2_data_intmax("blame", sb->repo, "bloom/response-no", bloom_count_no); } } git-cinnabar-0.7.0/git-core/blame.h000064400000000000000000000127711046102023000151400ustar 00000000000000#ifndef BLAME_H #define BLAME_H #include "oidset.h" #include "xdiff-interface.h" #include "prio-queue.h" #define PICKAXE_BLAME_MOVE 01 #define PICKAXE_BLAME_COPY 02 #define PICKAXE_BLAME_COPY_HARDER 04 #define PICKAXE_BLAME_COPY_HARDEST 010 #define BLAME_DEFAULT_MOVE_SCORE 20 #define BLAME_DEFAULT_COPY_SCORE 40 struct fingerprint; /* * One blob in a commit that is being suspected */ struct blame_origin { int refcnt; /* Record preceding blame record for this blob */ struct blame_origin *previous; /* origins are put in a list linked via `next' hanging off the * corresponding commit's util field in order to make finding * them fast. The presence in this chain does not count * towards the origin's reference count. It is tempting to * let it count as long as the commit is pending examination, * but even under circumstances where the commit will be * present multiple times in the priority queue of unexamined * commits, processing the first instance will not leave any * work requiring the origin data for the second instance. An * interspersed commit changing that would have to be * preexisting with a different ancestry and with the same * commit date in order to wedge itself between two instances * of the same commit in the priority queue _and_ produce * blame entries relevant for it. While we don't want to let * us get tripped up by this case, it certainly does not seem * worth optimizing for. */ struct blame_origin *next; struct commit *commit; /* `suspects' contains blame entries that may be attributed to * this origin's commit or to parent commits. When a commit * is being processed, all suspects will be moved, either by * assigning them to an origin in a different commit, or by * shipping them to the scoreboard's ent list because they * cannot be attributed to a different commit. */ struct blame_entry *suspects; mmfile_t file; int num_lines; struct fingerprint *fingerprints; struct object_id blob_oid; unsigned short mode; /* guilty gets set when shipping any suspects to the final * blame list instead of other commits */ char guilty; char path[FLEX_ARRAY]; }; /* * Each group of lines is described by a blame_entry; it can be split * as we pass blame to the parents. They are arranged in linked lists * kept as `suspects' of some unprocessed origin, or entered (when the * blame origin has been finalized) into the scoreboard structure. * While the scoreboard structure is only sorted at the end of * processing (according to final image line number), the lists * attached to an origin are sorted by the target line number. */ struct blame_entry { struct blame_entry *next; /* the first line of this group in the final image; * internally all line numbers are 0 based. */ int lno; /* how many lines this group has */ int num_lines; /* the commit that introduced this group into the final image */ struct blame_origin *suspect; /* the line number of the first line of this group in the * suspect's file; internally all line numbers are 0 based. */ int s_lno; /* how significant this entry is -- cached to avoid * scanning the lines over and over. */ unsigned score; int ignored; int unblamable; }; struct blame_bloom_data; /* * The current state of the blame assignment. */ struct blame_scoreboard { /* the final commit (i.e. where we started digging from) */ struct commit *final; /* Priority queue for commits with unassigned blame records */ struct prio_queue commits; struct repository *repo; struct rev_info *revs; const char *path; /* * The contents in the final image. * Used by many functions to obtain contents of the nth line, * indexed with scoreboard.lineno[blame_entry.lno]. */ char *final_buf; unsigned long final_buf_size; /* linked list of blames */ struct blame_entry *ent; struct oidset ignore_list; /* look-up a line in the final buffer */ int num_lines; int *lineno; /* stats */ int num_read_blob; int num_get_patch; int num_commits; /* * blame for a blame_entry with score lower than these thresholds * is not passed to the parent using move/copy logic. */ unsigned move_score; unsigned copy_score; /* use this file's contents as the final image */ const char *contents_from; /* flags */ int reverse; int show_root; int xdl_opts; int no_whole_file_rename; int debug; /* callbacks */ void(*on_sanity_fail)(struct blame_scoreboard *, int); void(*found_guilty_entry)(struct blame_entry *, void *); void *found_guilty_entry_data; struct blame_bloom_data *bloom_data; }; /* * Origin is refcounted and usually we keep the blob contents to be * reused. */ static inline struct blame_origin *blame_origin_incref(struct blame_origin *o) { if (o) o->refcnt++; return o; } void blame_origin_decref(struct blame_origin *o); void blame_coalesce(struct blame_scoreboard *sb); void blame_sort_final(struct blame_scoreboard *sb); unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e); void assign_blame(struct blame_scoreboard *sb, int opt); const char *blame_nth_line(struct blame_scoreboard *sb, long lno); void init_scoreboard(struct blame_scoreboard *sb); void setup_scoreboard(struct blame_scoreboard *sb, struct blame_origin **orig); void setup_blame_bloom_data(struct blame_scoreboard *sb); void cleanup_scoreboard(struct blame_scoreboard *sb); struct blame_entry *blame_entry_prepend(struct blame_entry *head, long start, long end, struct blame_origin *o); struct blame_origin *get_blame_suspects(struct commit *commit); #endif /* BLAME_H */ git-cinnabar-0.7.0/git-core/blob.c000064400000000000000000000006231046102023000147620ustar 00000000000000#include "git-compat-util.h" #include "blob.h" #include "alloc.h" const char *blob_type = "blob"; struct blob *lookup_blob(struct repository *r, const struct object_id *oid) { struct object *obj = lookup_object(r, oid); if (!obj) return create_object(r, oid, alloc_blob_node(r)); return object_as_type(obj, OBJ_BLOB, 0); } void parse_blob_buffer(struct blob *item) { item->object.parsed = 1; } git-cinnabar-0.7.0/git-core/blob.h000064400000000000000000000012161046102023000147660ustar 00000000000000#ifndef BLOB_H #define BLOB_H #include "object.h" extern const char *blob_type; struct blob { struct object object; }; struct blob *lookup_blob(struct repository *r, const struct object_id *oid); /** * Blobs do not contain references to other objects and do not have * structured data that needs parsing. However, code may use the * "parsed" bit in the struct object for a blob to determine whether * its content has been found to actually be available, so * parse_blob_buffer() is used (by object.c) to flag that the object * has been read successfully from the database. **/ void parse_blob_buffer(struct blob *item); #endif /* BLOB_H */ git-cinnabar-0.7.0/git-core/block-sha1/sha1.c000064400000000000000000000152201046102023000166230ustar 00000000000000/* * SHA1 routine optimized to do word accesses rather than byte accesses, * and to avoid unnecessary copies into the context array. * * This was initially based on the Mozilla SHA1 implementation, although * none of the original Mozilla code remains. */ /* this is only to get definitions for memcpy(), ntohl() and htonl() */ #include "../git-compat-util.h" #include "sha1.h" #define SHA_ROT(X,l,r) (((X) << (l)) | ((X) >> (r))) #define SHA_ROL(X,n) SHA_ROT(X,n,32-(n)) #define SHA_ROR(X,n) SHA_ROT(X,32-(n),n) /* * If you have 32 registers or more, the compiler can (and should) * try to change the array[] accesses into registers. However, on * machines with less than ~25 registers, that won't really work, * and at least gcc will make an unholy mess of it. * * So to avoid that mess which just slows things down, we force * the stores to memory to actually happen (we might be better off * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as * suggested by Artur Skawina - that will also make gcc unable to * try to do the silly "optimize away loads" part because it won't * see what the value will be). * * On ARM we get the best code generation by forcing a full memory barrier * between each SHA_ROUND, otherwise gcc happily get wild with spilling and * the stack frame size simply explode and performance goes down the drain. */ #if defined(__i386__) || defined(__x86_64__) #define setW(x, val) (*(volatile unsigned int *)&W(x) = (val)) #elif defined(__GNUC__) && defined(__arm__) #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) #else #define setW(x, val) (W(x) = (val)) #endif /* This "rolls" over the 512-bit array */ #define W(x) (array[(x)&15]) /* * Where do we get the source from? The first 16 iterations get it from * the input data, the next mix it from the 512-bit array. */ #define SHA_SRC(t) get_be32((unsigned char *) block + (t)*4) #define SHA_MIX(t) SHA_ROL(W((t)+13) ^ W((t)+8) ^ W((t)+2) ^ W(t), 1) #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ unsigned int TEMP = input(t); setW(t, TEMP); \ E += TEMP + SHA_ROL(A,5) + (fn) + (constant); \ B = SHA_ROR(B, 2); } while (0) #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) #define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) static void blk_SHA1_Block(blk_SHA_CTX *ctx, const void *block) { unsigned int A,B,C,D,E; unsigned int array[16]; A = ctx->H[0]; B = ctx->H[1]; C = ctx->H[2]; D = ctx->H[3]; E = ctx->H[4]; /* Round 1 - iterations 0-16 take their input from 'block' */ T_0_15( 0, A, B, C, D, E); T_0_15( 1, E, A, B, C, D); T_0_15( 2, D, E, A, B, C); T_0_15( 3, C, D, E, A, B); T_0_15( 4, B, C, D, E, A); T_0_15( 5, A, B, C, D, E); T_0_15( 6, E, A, B, C, D); T_0_15( 7, D, E, A, B, C); T_0_15( 8, C, D, E, A, B); T_0_15( 9, B, C, D, E, A); T_0_15(10, A, B, C, D, E); T_0_15(11, E, A, B, C, D); T_0_15(12, D, E, A, B, C); T_0_15(13, C, D, E, A, B); T_0_15(14, B, C, D, E, A); T_0_15(15, A, B, C, D, E); /* Round 1 - tail. Input from 512-bit mixing array */ T_16_19(16, E, A, B, C, D); T_16_19(17, D, E, A, B, C); T_16_19(18, C, D, E, A, B); T_16_19(19, B, C, D, E, A); /* Round 2 */ T_20_39(20, A, B, C, D, E); T_20_39(21, E, A, B, C, D); T_20_39(22, D, E, A, B, C); T_20_39(23, C, D, E, A, B); T_20_39(24, B, C, D, E, A); T_20_39(25, A, B, C, D, E); T_20_39(26, E, A, B, C, D); T_20_39(27, D, E, A, B, C); T_20_39(28, C, D, E, A, B); T_20_39(29, B, C, D, E, A); T_20_39(30, A, B, C, D, E); T_20_39(31, E, A, B, C, D); T_20_39(32, D, E, A, B, C); T_20_39(33, C, D, E, A, B); T_20_39(34, B, C, D, E, A); T_20_39(35, A, B, C, D, E); T_20_39(36, E, A, B, C, D); T_20_39(37, D, E, A, B, C); T_20_39(38, C, D, E, A, B); T_20_39(39, B, C, D, E, A); /* Round 3 */ T_40_59(40, A, B, C, D, E); T_40_59(41, E, A, B, C, D); T_40_59(42, D, E, A, B, C); T_40_59(43, C, D, E, A, B); T_40_59(44, B, C, D, E, A); T_40_59(45, A, B, C, D, E); T_40_59(46, E, A, B, C, D); T_40_59(47, D, E, A, B, C); T_40_59(48, C, D, E, A, B); T_40_59(49, B, C, D, E, A); T_40_59(50, A, B, C, D, E); T_40_59(51, E, A, B, C, D); T_40_59(52, D, E, A, B, C); T_40_59(53, C, D, E, A, B); T_40_59(54, B, C, D, E, A); T_40_59(55, A, B, C, D, E); T_40_59(56, E, A, B, C, D); T_40_59(57, D, E, A, B, C); T_40_59(58, C, D, E, A, B); T_40_59(59, B, C, D, E, A); /* Round 4 */ T_60_79(60, A, B, C, D, E); T_60_79(61, E, A, B, C, D); T_60_79(62, D, E, A, B, C); T_60_79(63, C, D, E, A, B); T_60_79(64, B, C, D, E, A); T_60_79(65, A, B, C, D, E); T_60_79(66, E, A, B, C, D); T_60_79(67, D, E, A, B, C); T_60_79(68, C, D, E, A, B); T_60_79(69, B, C, D, E, A); T_60_79(70, A, B, C, D, E); T_60_79(71, E, A, B, C, D); T_60_79(72, D, E, A, B, C); T_60_79(73, C, D, E, A, B); T_60_79(74, B, C, D, E, A); T_60_79(75, A, B, C, D, E); T_60_79(76, E, A, B, C, D); T_60_79(77, D, E, A, B, C); T_60_79(78, C, D, E, A, B); T_60_79(79, B, C, D, E, A); ctx->H[0] += A; ctx->H[1] += B; ctx->H[2] += C; ctx->H[3] += D; ctx->H[4] += E; } void blk_SHA1_Init(blk_SHA_CTX *ctx) { ctx->size = 0; /* Initialize H with the magic constants (see FIPS180 for constants) */ ctx->H[0] = 0x67452301; ctx->H[1] = 0xefcdab89; ctx->H[2] = 0x98badcfe; ctx->H[3] = 0x10325476; ctx->H[4] = 0xc3d2e1f0; } void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *data, size_t len) { unsigned int lenW = ctx->size & 63; ctx->size += len; /* Read the data into W and process blocks as they get full */ if (lenW) { unsigned int left = 64 - lenW; if (len < left) left = len; memcpy(lenW + (char *)ctx->W, data, left); lenW = (lenW + left) & 63; len -= left; data = ((const char *)data + left); if (lenW) return; blk_SHA1_Block(ctx, ctx->W); } while (len >= 64) { blk_SHA1_Block(ctx, data); data = ((const char *)data + 64); len -= 64; } if (len) memcpy(ctx->W, data, len); } void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx) { static const unsigned char pad[64] = { 0x80 }; unsigned int padlen[2]; int i; /* Pad with a binary 1 (ie 0x80), then zeroes, then length */ padlen[0] = htonl((uint32_t)(ctx->size >> 29)); padlen[1] = htonl((uint32_t)(ctx->size << 3)); i = ctx->size & 63; blk_SHA1_Update(ctx, pad, 1 + (63 & (55 - i))); blk_SHA1_Update(ctx, padlen, 8); /* Output hash */ for (i = 0; i < 5; i++) put_be32(hashout + i * 4, ctx->H[i]); } git-cinnabar-0.7.0/git-core/block-sha1/sha1.h000064400000000000000000000013411046102023000166270ustar 00000000000000/* * SHA1 routine optimized to do word accesses rather than byte accesses, * and to avoid unnecessary copies into the context array. * * This was initially based on the Mozilla SHA1 implementation, although * none of the original Mozilla code remains. */ typedef struct { unsigned long long size; unsigned int H[5]; unsigned int W[16]; } blk_SHA_CTX; void blk_SHA1_Init(blk_SHA_CTX *ctx); void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, size_t len); void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx); #ifndef platform_SHA_CTX #define platform_SHA_CTX blk_SHA_CTX #define platform_SHA1_Init blk_SHA1_Init #define platform_SHA1_Update blk_SHA1_Update #define platform_SHA1_Final blk_SHA1_Final #endif git-cinnabar-0.7.0/git-core/bloom.c000064400000000000000000000323741046102023000151640ustar 00000000000000#define DISABLE_SIGN_COMPARE_WARNINGS #include "git-compat-util.h" #include "bloom.h" #include "diff.h" #include "diffcore.h" #include "hashmap.h" #include "commit-graph.h" #include "commit.h" #include "commit-slab.h" #include "tree.h" #include "tree-walk.h" #include "config.h" #include "repository.h" define_commit_slab(bloom_filter_slab, struct bloom_filter); static struct bloom_filter_slab bloom_filters; struct pathmap_hash_entry { struct hashmap_entry entry; const char path[FLEX_ARRAY]; }; static uint32_t rotate_left(uint32_t value, int32_t count) { uint32_t mask = 8 * sizeof(uint32_t) - 1; count &= mask; return ((value << count) | (value >> ((-count) & mask))); } static inline unsigned char get_bitmask(uint32_t pos) { return ((unsigned char)1) << (pos & (BITS_PER_WORD - 1)); } static int check_bloom_offset(struct commit_graph *g, uint32_t pos, uint32_t offset) { /* * Note that we allow offsets equal to the data size, which would set * our pointers at one past the end of the chunk memory. This is * necessary because the on-disk index points to the end of the * entries (so we can compute size by comparing adjacent ones). And * naturally the final entry's end is one-past-the-end of the chunk. */ if (offset <= g->chunk_bloom_data_size - BLOOMDATA_CHUNK_HEADER_SIZE) return 0; warning("ignoring out-of-range offset (%"PRIuMAX") for changed-path" " filter at pos %"PRIuMAX" of %s (chunk size: %"PRIuMAX")", (uintmax_t)offset, (uintmax_t)pos, g->filename, (uintmax_t)g->chunk_bloom_data_size); return -1; } int load_bloom_filter_from_graph(struct commit_graph *g, struct bloom_filter *filter, uint32_t graph_pos) { uint32_t lex_pos, start_index, end_index; while (graph_pos < g->num_commits_in_base) g = g->base_graph; /* The commit graph commit 'c' lives in doesn't carry Bloom filters. */ if (!g->chunk_bloom_indexes) return 0; lex_pos = graph_pos - g->num_commits_in_base; end_index = get_be32(g->chunk_bloom_indexes + 4 * lex_pos); if (lex_pos > 0) start_index = get_be32(g->chunk_bloom_indexes + 4 * (lex_pos - 1)); else start_index = 0; if (check_bloom_offset(g, lex_pos, end_index) < 0 || check_bloom_offset(g, lex_pos - 1, start_index) < 0) return 0; if (end_index < start_index) { warning("ignoring decreasing changed-path index offsets" " (%"PRIuMAX" > %"PRIuMAX") for positions" " %"PRIuMAX" and %"PRIuMAX" of %s", (uintmax_t)start_index, (uintmax_t)end_index, (uintmax_t)(lex_pos-1), (uintmax_t)lex_pos, g->filename); return 0; } filter->len = end_index - start_index; filter->data = (unsigned char *)(g->chunk_bloom_data + sizeof(unsigned char) * start_index + BLOOMDATA_CHUNK_HEADER_SIZE); filter->version = g->bloom_filter_settings->hash_version; filter->to_free = NULL; return 1; } /* * Calculate the murmur3 32-bit hash value for the given data * using the given seed. * Produces a uniformly distributed hash value. * Not considered to be cryptographically secure. * Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm */ uint32_t murmur3_seeded_v2(uint32_t seed, const char *data, size_t len) { const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; const uint32_t r1 = 15; const uint32_t r2 = 13; const uint32_t m = 5; const uint32_t n = 0xe6546b64; int i; uint32_t k1 = 0; const char *tail; int len4 = len / sizeof(uint32_t); uint32_t k; for (i = 0; i < len4; i++) { uint32_t byte1 = (uint32_t)(unsigned char)data[4*i]; uint32_t byte2 = ((uint32_t)(unsigned char)data[4*i + 1]) << 8; uint32_t byte3 = ((uint32_t)(unsigned char)data[4*i + 2]) << 16; uint32_t byte4 = ((uint32_t)(unsigned char)data[4*i + 3]) << 24; k = byte1 | byte2 | byte3 | byte4; k *= c1; k = rotate_left(k, r1); k *= c2; seed ^= k; seed = rotate_left(seed, r2) * m + n; } tail = (data + len4 * sizeof(uint32_t)); switch (len & (sizeof(uint32_t) - 1)) { case 3: k1 ^= ((uint32_t)(unsigned char)tail[2]) << 16; /*-fallthrough*/ case 2: k1 ^= ((uint32_t)(unsigned char)tail[1]) << 8; /*-fallthrough*/ case 1: k1 ^= ((uint32_t)(unsigned char)tail[0]) << 0; k1 *= c1; k1 = rotate_left(k1, r1); k1 *= c2; seed ^= k1; break; } seed ^= (uint32_t)len; seed ^= (seed >> 16); seed *= 0x85ebca6b; seed ^= (seed >> 13); seed *= 0xc2b2ae35; seed ^= (seed >> 16); return seed; } static uint32_t murmur3_seeded_v1(uint32_t seed, const char *data, size_t len) { const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; const uint32_t r1 = 15; const uint32_t r2 = 13; const uint32_t m = 5; const uint32_t n = 0xe6546b64; int i; uint32_t k1 = 0; const char *tail; int len4 = len / sizeof(uint32_t); uint32_t k; for (i = 0; i < len4; i++) { uint32_t byte1 = (uint32_t)data[4*i]; uint32_t byte2 = ((uint32_t)data[4*i + 1]) << 8; uint32_t byte3 = ((uint32_t)data[4*i + 2]) << 16; uint32_t byte4 = ((uint32_t)data[4*i + 3]) << 24; k = byte1 | byte2 | byte3 | byte4; k *= c1; k = rotate_left(k, r1); k *= c2; seed ^= k; seed = rotate_left(seed, r2) * m + n; } tail = (data + len4 * sizeof(uint32_t)); switch (len & (sizeof(uint32_t) - 1)) { case 3: k1 ^= ((uint32_t)tail[2]) << 16; /*-fallthrough*/ case 2: k1 ^= ((uint32_t)tail[1]) << 8; /*-fallthrough*/ case 1: k1 ^= ((uint32_t)tail[0]) << 0; k1 *= c1; k1 = rotate_left(k1, r1); k1 *= c2; seed ^= k1; break; } seed ^= (uint32_t)len; seed ^= (seed >> 16); seed *= 0x85ebca6b; seed ^= (seed >> 13); seed *= 0xc2b2ae35; seed ^= (seed >> 16); return seed; } void fill_bloom_key(const char *data, size_t len, struct bloom_key *key, const struct bloom_filter_settings *settings) { int i; const uint32_t seed0 = 0x293ae76f; const uint32_t seed1 = 0x7e646e2c; uint32_t hash0, hash1; if (settings->hash_version == 2) { hash0 = murmur3_seeded_v2(seed0, data, len); hash1 = murmur3_seeded_v2(seed1, data, len); } else { hash0 = murmur3_seeded_v1(seed0, data, len); hash1 = murmur3_seeded_v1(seed1, data, len); } key->hashes = (uint32_t *)xcalloc(settings->num_hashes, sizeof(uint32_t)); for (i = 0; i < settings->num_hashes; i++) key->hashes[i] = hash0 + i * hash1; } void clear_bloom_key(struct bloom_key *key) { FREE_AND_NULL(key->hashes); } void add_key_to_filter(const struct bloom_key *key, struct bloom_filter *filter, const struct bloom_filter_settings *settings) { int i; uint64_t mod = filter->len * BITS_PER_WORD; for (i = 0; i < settings->num_hashes; i++) { uint64_t hash_mod = key->hashes[i] % mod; uint64_t block_pos = hash_mod / BITS_PER_WORD; filter->data[block_pos] |= get_bitmask(hash_mod); } } void init_bloom_filters(void) { init_bloom_filter_slab(&bloom_filters); } static void free_one_bloom_filter(struct bloom_filter *filter) { if (!filter) return; free(filter->to_free); } void deinit_bloom_filters(void) { deep_clear_bloom_filter_slab(&bloom_filters, free_one_bloom_filter); } static int pathmap_cmp(const void *hashmap_cmp_fn_data UNUSED, const struct hashmap_entry *eptr, const struct hashmap_entry *entry_or_key, const void *keydata UNUSED) { const struct pathmap_hash_entry *e1, *e2; e1 = container_of(eptr, const struct pathmap_hash_entry, entry); e2 = container_of(entry_or_key, const struct pathmap_hash_entry, entry); return strcmp(e1->path, e2->path); } static void init_truncated_large_filter(struct bloom_filter *filter, int version) { filter->data = filter->to_free = xmalloc(1); filter->data[0] = 0xFF; filter->len = 1; filter->version = version; } #define VISITED (1u<<21) #define HIGH_BITS (1u<<22) static int has_entries_with_high_bit(struct repository *r, struct tree *t) { if (parse_tree(t)) return 1; if (!(t->object.flags & VISITED)) { struct tree_desc desc; struct name_entry entry; init_tree_desc(&desc, &t->object.oid, t->buffer, t->size); while (tree_entry(&desc, &entry)) { size_t i; for (i = 0; i < entry.pathlen; i++) { if (entry.path[i] & 0x80) { t->object.flags |= HIGH_BITS; goto done; } } if (S_ISDIR(entry.mode)) { struct tree *sub = lookup_tree(r, &entry.oid); if (sub && has_entries_with_high_bit(r, sub)) { t->object.flags |= HIGH_BITS; goto done; } } } done: t->object.flags |= VISITED; } return !!(t->object.flags & HIGH_BITS); } static int commit_tree_has_high_bit_paths(struct repository *r, struct commit *c) { struct tree *t; if (repo_parse_commit(r, c)) return 1; t = repo_get_commit_tree(r, c); if (!t) return 1; return has_entries_with_high_bit(r, t); } static struct bloom_filter *upgrade_filter(struct repository *r, struct commit *c, struct bloom_filter *filter, int hash_version) { struct commit_list *p = c->parents; if (commit_tree_has_high_bit_paths(r, c)) return NULL; if (p && commit_tree_has_high_bit_paths(r, p->item)) return NULL; filter->version = hash_version; return filter; } struct bloom_filter *get_bloom_filter(struct repository *r, struct commit *c) { struct bloom_filter *filter; int hash_version; filter = get_or_compute_bloom_filter(r, c, 0, NULL, NULL); if (!filter) return NULL; prepare_repo_settings(r); hash_version = r->settings.commit_graph_changed_paths_version; if (!(hash_version == -1 || hash_version == filter->version)) return NULL; /* unusable filter */ return filter; } struct bloom_filter *get_or_compute_bloom_filter(struct repository *r, struct commit *c, int compute_if_not_present, const struct bloom_filter_settings *settings, enum bloom_filter_computed *computed) { struct bloom_filter *filter; int i; struct diff_options diffopt; if (computed) *computed = BLOOM_NOT_COMPUTED; if (!bloom_filters.slab_size) return NULL; filter = bloom_filter_slab_at(&bloom_filters, c); if (!filter->data) { uint32_t graph_pos; if (repo_find_commit_pos_in_graph(r, c, &graph_pos)) load_bloom_filter_from_graph(r->objects->commit_graph, filter, graph_pos); } if (filter->data && filter->len) { struct bloom_filter *upgrade; if (!settings || settings->hash_version == filter->version) return filter; /* version mismatch, see if we can upgrade */ if (compute_if_not_present && git_env_bool("GIT_TEST_UPGRADE_BLOOM_FILTERS", 1)) { upgrade = upgrade_filter(r, c, filter, settings->hash_version); if (upgrade) { if (computed) *computed |= BLOOM_UPGRADED; return upgrade; } } } if (!compute_if_not_present) return NULL; repo_diff_setup(r, &diffopt); diffopt.flags.recursive = 1; diffopt.detect_rename = 0; diffopt.max_changes = settings->max_changed_paths; diff_setup_done(&diffopt); /* ensure commit is parsed so we have parent information */ repo_parse_commit(r, c); if (c->parents) diff_tree_oid(&c->parents->item->object.oid, &c->object.oid, "", &diffopt); else diff_tree_oid(NULL, &c->object.oid, "", &diffopt); diffcore_std(&diffopt); if (diff_queued_diff.nr <= settings->max_changed_paths) { struct hashmap pathmap = HASHMAP_INIT(pathmap_cmp, NULL); struct pathmap_hash_entry *e; struct hashmap_iter iter; for (i = 0; i < diff_queued_diff.nr; i++) { const char *path = diff_queued_diff.queue[i]->two->path; /* * Add each leading directory of the changed file, i.e. for * 'dir/subdir/file' add 'dir' and 'dir/subdir' as well, so * the Bloom filter could be used to speed up commands like * 'git log dir/subdir', too. * * Note that directories are added without the trailing '/'. */ do { char *last_slash = strrchr(path, '/'); FLEX_ALLOC_STR(e, path, path); hashmap_entry_init(&e->entry, strhash(path)); if (!hashmap_get(&pathmap, &e->entry, NULL)) hashmap_add(&pathmap, &e->entry); else free(e); if (!last_slash) last_slash = (char*)path; *last_slash = '\0'; } while (*path); } if (hashmap_get_size(&pathmap) > settings->max_changed_paths) { init_truncated_large_filter(filter, settings->hash_version); if (computed) *computed |= BLOOM_TRUNC_LARGE; goto cleanup; } filter->len = (hashmap_get_size(&pathmap) * settings->bits_per_entry + BITS_PER_WORD - 1) / BITS_PER_WORD; filter->version = settings->hash_version; if (!filter->len) { if (computed) *computed |= BLOOM_TRUNC_EMPTY; filter->len = 1; } CALLOC_ARRAY(filter->data, filter->len); filter->to_free = filter->data; hashmap_for_each_entry(&pathmap, &iter, e, entry) { struct bloom_key key; fill_bloom_key(e->path, strlen(e->path), &key, settings); add_key_to_filter(&key, filter, settings); clear_bloom_key(&key); } cleanup: hashmap_clear_and_free(&pathmap, struct pathmap_hash_entry, entry); } else { init_truncated_large_filter(filter, settings->hash_version); if (computed) *computed |= BLOOM_TRUNC_LARGE; } if (computed) *computed |= BLOOM_COMPUTED; diff_queue_clear(&diff_queued_diff); return filter; } int bloom_filter_contains(const struct bloom_filter *filter, const struct bloom_key *key, const struct bloom_filter_settings *settings) { int i; uint64_t mod = filter->len * BITS_PER_WORD; if (!mod) return -1; for (i = 0; i < settings->num_hashes; i++) { uint64_t hash_mod = key->hashes[i] % mod; uint64_t block_pos = hash_mod / BITS_PER_WORD; if (!(filter->data[block_pos] & get_bitmask(hash_mod))) return 0; } return 1; } git-cinnabar-0.7.0/git-core/bloom.h000064400000000000000000000100451046102023000151600ustar 00000000000000#ifndef BLOOM_H #define BLOOM_H struct commit; struct repository; struct commit_graph; struct bloom_filter_settings { /* * The version of the hashing technique being used. * The newest version is 2, which is * the seeded murmur3 hashing technique implemented * in bloom.c. Bloom filters of version 1 were created * with prior versions of Git, which had a bug in the * implementation of the hash function. */ uint32_t hash_version; /* * The number of times a path is hashed, i.e. the * number of bit positions that cumulatively * determine whether a path is present in the * Bloom filter. */ uint32_t num_hashes; /* * The minimum number of bits per entry in the Bloom * filter. If the filter contains 'n' entries, then * filter size is the minimum number of 8-bit words * that contain n*b bits. */ uint32_t bits_per_entry; /* * The maximum number of changed paths per commit * before declaring a Bloom filter to be too-large. * * Not written to the commit-graph file. */ uint32_t max_changed_paths; }; #define DEFAULT_BLOOM_MAX_CHANGES 512 #define DEFAULT_BLOOM_FILTER_SETTINGS { 1, 7, 10, DEFAULT_BLOOM_MAX_CHANGES } #define BITS_PER_WORD 8 #define BLOOMDATA_CHUNK_HEADER_SIZE 3 * sizeof(uint32_t) /* * A bloom_filter struct represents a data segment to * use when testing hash values. The 'len' member * dictates how many entries are stored in * 'data'. */ struct bloom_filter { unsigned char *data; size_t len; int version; void *to_free; }; /* * A bloom_key represents the k hash values for a * given string. These can be precomputed and * stored in a bloom_key for re-use when testing * against a bloom_filter. The number of hashes is * given by the Bloom filter settings and is the same * for all Bloom filters and keys interacting with * the loaded version of the commit graph file and * the Bloom data chunks. */ struct bloom_key { uint32_t *hashes; }; int load_bloom_filter_from_graph(struct commit_graph *g, struct bloom_filter *filter, uint32_t graph_pos); /* * Calculate the murmur3 32-bit hash value for the given data * using the given seed. * Produces a uniformly distributed hash value. * Not considered to be cryptographically secure. * Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm */ uint32_t murmur3_seeded_v2(uint32_t seed, const char *data, size_t len); void fill_bloom_key(const char *data, size_t len, struct bloom_key *key, const struct bloom_filter_settings *settings); void clear_bloom_key(struct bloom_key *key); void add_key_to_filter(const struct bloom_key *key, struct bloom_filter *filter, const struct bloom_filter_settings *settings); void init_bloom_filters(void); void deinit_bloom_filters(void); enum bloom_filter_computed { BLOOM_NOT_COMPUTED = (1 << 0), BLOOM_COMPUTED = (1 << 1), BLOOM_TRUNC_LARGE = (1 << 2), BLOOM_TRUNC_EMPTY = (1 << 3), BLOOM_UPGRADED = (1 << 4), }; struct bloom_filter *get_or_compute_bloom_filter(struct repository *r, struct commit *c, int compute_if_not_present, const struct bloom_filter_settings *settings, enum bloom_filter_computed *computed); /* * Find the Bloom filter associated with the given commit "c". * * If any of the following are true * * - the repository does not have a commit-graph, or * - the repository disables reading from the commit-graph, or * - the given commit does not have a Bloom filter computed, or * - there is a Bloom filter for commit "c", but it cannot be read * because the filter uses an incompatible version of murmur3 * * , then `get_bloom_filter()` will return NULL. Otherwise, the corresponding * Bloom filter will be returned. * * For callers who wish to inspect Bloom filters with incompatible hash * versions, use get_or_compute_bloom_filter(). */ struct bloom_filter *get_bloom_filter(struct repository *r, struct commit *c); int bloom_filter_contains(const struct bloom_filter *filter, const struct bloom_key *key, const struct bloom_filter_settings *settings); #endif git-cinnabar-0.7.0/git-core/branch.c000064400000000000000000000612341046102023000153060ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "git-compat-util.h" #include "advice.h" #include "config.h" #include "branch.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "object-name.h" #include "path.h" #include "refs.h" #include "refspec.h" #include "remote.h" #include "repository.h" #include "sequencer.h" #include "commit.h" #include "worktree.h" #include "submodule-config.h" #include "run-command.h" #include "strmap.h" struct tracking { struct refspec_item spec; struct string_list *srcs; const char *remote; int matches; }; struct find_tracked_branch_cb { struct tracking *tracking; struct string_list ambiguous_remotes; }; static int find_tracked_branch(struct remote *remote, void *priv) { struct find_tracked_branch_cb *ftb = priv; struct tracking *tracking = ftb->tracking; if (!remote_find_tracking(remote, &tracking->spec)) { switch (++tracking->matches) { case 1: string_list_append_nodup(tracking->srcs, tracking->spec.src); tracking->remote = remote->name; break; case 2: /* there are at least two remotes; backfill the first one */ string_list_append(&ftb->ambiguous_remotes, tracking->remote); /* fall through */ default: string_list_append(&ftb->ambiguous_remotes, remote->name); free(tracking->spec.src); string_list_clear(tracking->srcs, 0); break; } /* remote_find_tracking() searches by src if present */ tracking->spec.src = NULL; } return 0; } static int should_setup_rebase(const char *origin) { switch (autorebase) { case AUTOREBASE_NEVER: return 0; case AUTOREBASE_LOCAL: return origin == NULL; case AUTOREBASE_REMOTE: return origin != NULL; case AUTOREBASE_ALWAYS: return 1; } return 0; } /** * Install upstream tracking configuration for a branch; specifically, add * `branch..remote` and `branch..merge` entries. * * `flag` contains integer flags for options; currently only * BRANCH_CONFIG_VERBOSE is checked. * * `local` is the name of the branch whose configuration we're installing. * * `origin` is the name of the remote owning the upstream branches. NULL means * the upstream branches are local to this repo. * * `remotes` is a list of refs that are upstream of local */ static int install_branch_config_multiple_remotes(int flag, const char *local, const char *origin, struct string_list *remotes) { const char *shortname = NULL; struct strbuf key = STRBUF_INIT; struct string_list_item *item; int rebasing = should_setup_rebase(origin); if (!remotes->nr) BUG("must provide at least one remote for branch config"); if (rebasing && remotes->nr > 1) die(_("cannot inherit upstream tracking configuration of " "multiple refs when rebasing is requested")); /* * If the new branch is trying to track itself, something has gone * wrong. Warn the user and don't proceed any further. */ if (!origin) for_each_string_list_item(item, remotes) if (skip_prefix(item->string, "refs/heads/", &shortname) && !strcmp(local, shortname)) { warning(_("not setting branch '%s' as its own upstream"), local); return 0; } strbuf_addf(&key, "branch.%s.remote", local); if (git_config_set_gently(key.buf, origin ? origin : ".") < 0) goto out_err; strbuf_reset(&key); strbuf_addf(&key, "branch.%s.merge", local); /* * We want to overwrite any existing config with all the branches in * "remotes". Override any existing config, then write our branches. If * more than one is provided, use CONFIG_REGEX_NONE to preserve what * we've written so far. */ if (git_config_set_gently(key.buf, NULL) < 0) goto out_err; for_each_string_list_item(item, remotes) if (git_config_set_multivar_gently(key.buf, item->string, CONFIG_REGEX_NONE, 0) < 0) goto out_err; if (rebasing) { strbuf_reset(&key); strbuf_addf(&key, "branch.%s.rebase", local); if (git_config_set_gently(key.buf, "true") < 0) goto out_err; } strbuf_release(&key); if (flag & BRANCH_CONFIG_VERBOSE) { struct strbuf tmp_ref_name = STRBUF_INIT; struct string_list friendly_ref_names = STRING_LIST_INIT_DUP; for_each_string_list_item(item, remotes) { shortname = item->string; skip_prefix(shortname, "refs/heads/", &shortname); if (origin) { strbuf_addf(&tmp_ref_name, "%s/%s", origin, shortname); string_list_append_nodup( &friendly_ref_names, strbuf_detach(&tmp_ref_name, NULL)); } else { string_list_append( &friendly_ref_names, shortname); } } if (remotes->nr == 1) { /* * Rebasing is only allowed in the case of a single * upstream branch. */ printf_ln(rebasing ? _("branch '%s' set up to track '%s' by rebasing.") : _("branch '%s' set up to track '%s'."), local, friendly_ref_names.items[0].string); } else { printf_ln(_("branch '%s' set up to track:"), local); for_each_string_list_item(item, &friendly_ref_names) printf_ln(" %s", item->string); } string_list_clear(&friendly_ref_names, 0); } return 0; out_err: strbuf_release(&key); error(_("unable to write upstream branch configuration")); advise(_("\nAfter fixing the error cause you may try to fix up\n" "the remote tracking information by invoking:")); if (remotes->nr == 1) advise(" git branch --set-upstream-to=%s%s%s", origin ? origin : "", origin ? "/" : "", remotes->items[0].string); else { advise(" git config --add branch.\"%s\".remote %s", local, origin ? origin : "."); for_each_string_list_item(item, remotes) advise(" git config --add branch.\"%s\".merge %s", local, item->string); } return -1; } int install_branch_config(int flag, const char *local, const char *origin, const char *remote) { int ret; struct string_list remotes = STRING_LIST_INIT_DUP; string_list_append(&remotes, remote); ret = install_branch_config_multiple_remotes(flag, local, origin, &remotes); string_list_clear(&remotes, 0); return ret; } static int inherit_tracking(struct tracking *tracking, const char *orig_ref) { const char *bare_ref; struct branch *branch; int i; bare_ref = orig_ref; skip_prefix(orig_ref, "refs/heads/", &bare_ref); branch = branch_get(bare_ref); if (!branch->remote_name) { warning(_("asked to inherit tracking from '%s', but no remote is set"), bare_ref); return -1; } if (branch->merge_nr < 1 || !branch->merge_name || !branch->merge_name[0]) { warning(_("asked to inherit tracking from '%s', but no merge configuration is set"), bare_ref); return -1; } tracking->remote = branch->remote_name; for (i = 0; i < branch->merge_nr; i++) string_list_append(tracking->srcs, branch->merge_name[i]); return 0; } /* * Used internally to set the branch..{remote,merge} config * settings so that branch 'new_ref' tracks 'orig_ref'. Unlike * dwim_and_setup_tracking(), this does not do DWIM, i.e. "origin/main" * will not be expanded to "refs/remotes/origin/main", so it is not safe * for 'orig_ref' to be raw user input. */ static void setup_tracking(const char *new_ref, const char *orig_ref, enum branch_track track, int quiet) { struct tracking tracking; struct string_list tracking_srcs = STRING_LIST_INIT_DUP; int config_flags = quiet ? 0 : BRANCH_CONFIG_VERBOSE; struct find_tracked_branch_cb ftb_cb = { .tracking = &tracking, .ambiguous_remotes = STRING_LIST_INIT_DUP, }; if (!track) BUG("asked to set up tracking, but tracking is disallowed"); memset(&tracking, 0, sizeof(tracking)); tracking.spec.dst = (char *)orig_ref; tracking.srcs = &tracking_srcs; if (track != BRANCH_TRACK_INHERIT) for_each_remote(find_tracked_branch, &ftb_cb); else if (inherit_tracking(&tracking, orig_ref)) goto cleanup; if (!tracking.matches) switch (track) { /* If ref is not remote, still use local */ case BRANCH_TRACK_ALWAYS: case BRANCH_TRACK_EXPLICIT: case BRANCH_TRACK_OVERRIDE: /* Remote matches not evaluated */ case BRANCH_TRACK_INHERIT: break; /* Otherwise, if no remote don't track */ default: goto cleanup; } /* * This check does not apply to BRANCH_TRACK_INHERIT; * that supports multiple entries in tracking_srcs but * leaves tracking.matches at 0. */ if (tracking.matches > 1) { int status = die_message(_("not tracking: ambiguous information for ref '%s'"), orig_ref); if (advice_enabled(ADVICE_AMBIGUOUS_FETCH_REFSPEC)) { struct strbuf remotes_advice = STRBUF_INIT; struct string_list_item *item; for_each_string_list_item(item, &ftb_cb.ambiguous_remotes) /* * TRANSLATORS: This is a line listing a remote with duplicate * refspecs in the advice message below. For RTL languages you'll * probably want to swap the "%s" and leading " " space around. */ strbuf_addf(&remotes_advice, _(" %s\n"), item->string); /* * TRANSLATORS: The second argument is a \n-delimited list of * duplicate refspecs, composed above. */ advise(_("There are multiple remotes whose fetch refspecs map to the remote\n" "tracking ref '%s':\n" "%s" "\n" "This is typically a configuration error.\n" "\n" "To support setting up tracking branches, ensure that\n" "different remotes' fetch refspecs map into different\n" "tracking namespaces."), orig_ref, remotes_advice.buf); strbuf_release(&remotes_advice); } exit(status); } if (track == BRANCH_TRACK_SIMPLE) { /* * Only track if remote branch name matches. * Reaching into items[0].string is safe because * we know there is at least one and not more than * one entry (because only BRANCH_TRACK_INHERIT can * produce more than one entry). */ const char *tracked_branch; if (!skip_prefix(tracking.srcs->items[0].string, "refs/heads/", &tracked_branch) || strcmp(tracked_branch, new_ref)) goto cleanup; } if (tracking.srcs->nr < 1) string_list_append(tracking.srcs, orig_ref); if (install_branch_config_multiple_remotes(config_flags, new_ref, tracking.remote, tracking.srcs) < 0) exit(1); cleanup: string_list_clear(&tracking_srcs, 0); string_list_clear(&ftb_cb.ambiguous_remotes, 0); } int read_branch_desc(struct strbuf *buf, const char *branch_name) { char *v = NULL; struct strbuf name = STRBUF_INIT; strbuf_addf(&name, "branch.%s.description", branch_name); if (git_config_get_string(name.buf, &v)) { strbuf_release(&name); return -1; } strbuf_addstr(buf, v); free(v); strbuf_release(&name); return 0; } /* * Check if 'name' can be a valid name for a branch; die otherwise. * Return 1 if the named branch already exists; return 0 otherwise. * Fill ref with the full refname for the branch. */ int validate_branchname(const char *name, struct strbuf *ref) { if (check_branch_ref(ref, name)) { int code = die_message(_("'%s' is not a valid branch name"), name); advise_if_enabled(ADVICE_REF_SYNTAX, _("See `man git check-ref-format`")); exit(code); } return refs_ref_exists(get_main_ref_store(the_repository), ref->buf); } static int initialized_checked_out_branches; static struct strmap current_checked_out_branches = STRMAP_INIT; static void prepare_checked_out_branches(void) { int i = 0; struct worktree **worktrees; if (initialized_checked_out_branches) return; initialized_checked_out_branches = 1; worktrees = get_worktrees(); while (worktrees[i]) { char *old; struct wt_status_state state = { 0 }; struct worktree *wt = worktrees[i++]; struct string_list update_refs = STRING_LIST_INIT_DUP; if (wt->is_bare) continue; if (wt->head_ref) { old = strmap_put(¤t_checked_out_branches, wt->head_ref, xstrdup(wt->path)); free(old); } if (wt_status_check_rebase(wt, &state) && (state.rebase_in_progress || state.rebase_interactive_in_progress) && state.branch) { struct strbuf ref = STRBUF_INIT; strbuf_addf(&ref, "refs/heads/%s", state.branch); old = strmap_put(¤t_checked_out_branches, ref.buf, xstrdup(wt->path)); free(old); strbuf_release(&ref); } wt_status_state_free_buffers(&state); if (wt_status_check_bisect(wt, &state) && state.bisecting_from) { struct strbuf ref = STRBUF_INIT; strbuf_addf(&ref, "refs/heads/%s", state.bisecting_from); old = strmap_put(¤t_checked_out_branches, ref.buf, xstrdup(wt->path)); free(old); strbuf_release(&ref); } wt_status_state_free_buffers(&state); if (!sequencer_get_update_refs_state(get_worktree_git_dir(wt), &update_refs)) { struct string_list_item *item; for_each_string_list_item(item, &update_refs) { old = strmap_put(¤t_checked_out_branches, item->string, xstrdup(wt->path)); free(old); } string_list_clear(&update_refs, 1); } } free_worktrees(worktrees); } const char *branch_checked_out(const char *refname) { prepare_checked_out_branches(); return strmap_get(¤t_checked_out_branches, refname); } /* * Check if a branch 'name' can be created as a new branch; die otherwise. * 'force' can be used when it is OK for the named branch already exists. * Return 1 if the named branch already exists; return 0 otherwise. * Fill ref with the full refname for the branch. */ int validate_new_branchname(const char *name, struct strbuf *ref, int force) { const char *path; if (!validate_branchname(name, ref)) return 0; if (!force) die(_("a branch named '%s' already exists"), ref->buf + strlen("refs/heads/")); if ((path = branch_checked_out(ref->buf))) die(_("cannot force update the branch '%s' " "used by worktree at '%s'"), ref->buf + strlen("refs/heads/"), path); return 1; } static int check_tracking_branch(struct remote *remote, void *cb_data) { char *tracking_branch = cb_data; struct refspec_item query; int res; memset(&query, 0, sizeof(struct refspec_item)); query.dst = tracking_branch; res = !remote_find_tracking(remote, &query); free(query.src); return res; } static int validate_remote_tracking_branch(char *ref) { return !for_each_remote(check_tracking_branch, ref); } static const char upstream_not_branch[] = N_("cannot set up tracking information; starting point '%s' is not a branch"); static const char upstream_missing[] = N_("the requested upstream branch '%s' does not exist"); static const char upstream_advice[] = N_("\n" "If you are planning on basing your work on an upstream\n" "branch that already exists at the remote, you may need to\n" "run \"git fetch\" to retrieve it.\n" "\n" "If you are planning to push out a new local branch that\n" "will track its remote counterpart, you may want to use\n" "\"git push -u\" to set the upstream config as you push."); /** * DWIMs a user-provided ref to determine the starting point for a * branch and validates it, where: * * - r is the repository to validate the branch for * * - start_name is the ref that we would like to test. This is * expanded with DWIM and assigned to out_real_ref. * * - track is the tracking mode of the new branch. If tracking is * explicitly requested, start_name must be a branch (because * otherwise start_name cannot be tracked) * * - out_oid is an out parameter containing the object_id of start_name * * - out_real_ref is an out parameter containing the full, 'real' form * of start_name e.g. refs/heads/main instead of main * */ static void dwim_branch_start(struct repository *r, const char *start_name, enum branch_track track, char **out_real_ref, struct object_id *out_oid) { struct commit *commit; struct object_id oid; char *real_ref; int explicit_tracking = 0; if (track == BRANCH_TRACK_EXPLICIT || track == BRANCH_TRACK_OVERRIDE) explicit_tracking = 1; real_ref = NULL; if (repo_get_oid_mb(r, start_name, &oid)) { if (explicit_tracking) { int code = die_message(_(upstream_missing), start_name); advise_if_enabled(ADVICE_SET_UPSTREAM_FAILURE, _(upstream_advice)); exit(code); } die(_("not a valid object name: '%s'"), start_name); } switch (repo_dwim_ref(r, start_name, strlen(start_name), &oid, &real_ref, 0)) { case 0: /* Not branching from any existing branch */ if (explicit_tracking) die(_(upstream_not_branch), start_name); break; case 1: /* Unique completion -- good, only if it is a real branch */ if (!starts_with(real_ref, "refs/heads/") && validate_remote_tracking_branch(real_ref)) { if (explicit_tracking) die(_(upstream_not_branch), start_name); else FREE_AND_NULL(real_ref); } break; default: die(_("ambiguous object name: '%s'"), start_name); break; } if (!(commit = lookup_commit_reference(r, &oid))) die(_("not a valid branch point: '%s'"), start_name); if (out_real_ref) { *out_real_ref = real_ref; real_ref = NULL; } if (out_oid) oidcpy(out_oid, &commit->object.oid); FREE_AND_NULL(real_ref); } void create_branch(struct repository *r, const char *name, const char *start_name, int force, int clobber_head_ok, int reflog, int quiet, enum branch_track track, int dry_run) { struct object_id oid; char *real_ref; struct strbuf ref = STRBUF_INIT; int forcing = 0; struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; int flags = 0; char *msg; if (track == BRANCH_TRACK_OVERRIDE) BUG("'track' cannot be BRANCH_TRACK_OVERRIDE. Did you mean to call dwim_and_setup_tracking()?"); if (clobber_head_ok && !force) BUG("'clobber_head_ok' can only be used with 'force'"); if (clobber_head_ok ? validate_branchname(name, &ref) : validate_new_branchname(name, &ref, force)) { forcing = 1; } dwim_branch_start(r, start_name, track, &real_ref, &oid); if (dry_run) goto cleanup; if (reflog) flags |= REF_FORCE_CREATE_REFLOG; if (forcing) msg = xstrfmt("branch: Reset to %s", start_name); else msg = xstrfmt("branch: Created from %s", start_name); transaction = ref_store_transaction_begin(get_main_ref_store(the_repository), 0, &err); if (!transaction || ref_transaction_update(transaction, ref.buf, &oid, forcing ? NULL : null_oid(), NULL, NULL, flags, msg, &err) || ref_transaction_commit(transaction, &err)) die("%s", err.buf); ref_transaction_free(transaction); strbuf_release(&err); free(msg); if (real_ref && track) setup_tracking(ref.buf + 11, real_ref, track, quiet); cleanup: strbuf_release(&ref); free(real_ref); } void dwim_and_setup_tracking(struct repository *r, const char *new_ref, const char *orig_ref, enum branch_track track, int quiet) { char *real_orig_ref = NULL; dwim_branch_start(r, orig_ref, track, &real_orig_ref, NULL); setup_tracking(new_ref, real_orig_ref, track, quiet); free(real_orig_ref); } /** * Creates a branch in a submodule by calling * create_branches_recursively() in a child process. The child process * is necessary because install_branch_config_multiple_remotes() (which * is called by setup_tracking()) does not support writing configs to * submodules. */ static int submodule_create_branch(struct repository *r, const struct submodule *submodule, const char *name, const char *start_oid, const char *tracking_name, int force, int reflog, int quiet, enum branch_track track, int dry_run) { int ret = 0; struct child_process child = CHILD_PROCESS_INIT; struct strbuf child_err = STRBUF_INIT; struct strbuf out_buf = STRBUF_INIT; char *out_prefix = xstrfmt("submodule '%s': ", submodule->name); child.git_cmd = 1; child.err = -1; child.stdout_to_stderr = 1; prepare_other_repo_env(&child.env, r->gitdir); /* * submodule_create_branch() is indirectly invoked by "git * branch", but we cannot invoke "git branch" in the child * process. "git branch" accepts a branch name and start point, * where the start point is assumed to provide both the OID * (start_oid) and the branch to use for tracking * (tracking_name). But when recursing through submodules, * start_oid and tracking name need to be specified separately * (see create_branches_recursively()). */ strvec_pushl(&child.args, "submodule--helper", "create-branch", NULL); if (dry_run) strvec_push(&child.args, "--dry-run"); if (force) strvec_push(&child.args, "--force"); if (quiet) strvec_push(&child.args, "--quiet"); if (reflog) strvec_push(&child.args, "--create-reflog"); switch (track) { case BRANCH_TRACK_NEVER: strvec_push(&child.args, "--no-track"); break; case BRANCH_TRACK_ALWAYS: case BRANCH_TRACK_EXPLICIT: strvec_push(&child.args, "--track=direct"); break; case BRANCH_TRACK_OVERRIDE: BUG("BRANCH_TRACK_OVERRIDE cannot be used when creating a branch."); break; case BRANCH_TRACK_INHERIT: strvec_push(&child.args, "--track=inherit"); break; case BRANCH_TRACK_UNSPECIFIED: /* Default for "git checkout". Do not pass --track. */ case BRANCH_TRACK_REMOTE: /* Default for "git branch". Do not pass --track. */ case BRANCH_TRACK_SIMPLE: /* Config-driven only. Do not pass --track. */ break; } strvec_pushl(&child.args, name, start_oid, tracking_name, NULL); if ((ret = start_command(&child))) return ret; ret = finish_command(&child); strbuf_read(&child_err, child.err, 0); strbuf_add_lines(&out_buf, out_prefix, child_err.buf, child_err.len); if (ret) fprintf(stderr, "%s", out_buf.buf); else printf("%s", out_buf.buf); strbuf_release(&child_err); strbuf_release(&out_buf); free(out_prefix); return ret; } void create_branches_recursively(struct repository *r, const char *name, const char *start_committish, const char *tracking_name, int force, int reflog, int quiet, enum branch_track track, int dry_run) { int i = 0; char *branch_point = NULL; struct object_id super_oid; struct submodule_entry_list submodule_entry_list; /* Perform dwim on start_committish to get super_oid and branch_point. */ dwim_branch_start(r, start_committish, BRANCH_TRACK_NEVER, &branch_point, &super_oid); /* * If we were not given an explicit name to track, then assume we are at * the top level and, just like the non-recursive case, the tracking * name is the branch point. */ if (!tracking_name) tracking_name = branch_point; submodules_of_tree(r, &super_oid, &submodule_entry_list); /* * Before creating any branches, first check that the branch can * be created in every submodule. */ for (i = 0; i < submodule_entry_list.entry_nr; i++) { if (!submodule_entry_list.entries[i].repo) { int code = die_message( _("submodule '%s': unable to find submodule"), submodule_entry_list.entries[i].submodule->name); if (advice_enabled(ADVICE_SUBMODULES_NOT_UPDATED)) advise(_("You may try updating the submodules using 'git checkout --no-recurse-submodules %s && git submodule update --init'"), start_committish); exit(code); } if (submodule_create_branch( submodule_entry_list.entries[i].repo, submodule_entry_list.entries[i].submodule, name, oid_to_hex(&submodule_entry_list.entries[i] .name_entry->oid), tracking_name, force, reflog, quiet, track, 1)) die(_("submodule '%s': cannot create branch '%s'"), submodule_entry_list.entries[i].submodule->name, name); } create_branch(r, name, start_committish, force, 0, reflog, quiet, BRANCH_TRACK_NEVER, dry_run); if (dry_run) goto out; /* * NEEDSWORK If tracking was set up in the superproject but not the * submodule, users might expect "git branch --recurse-submodules" to * fail or give a warning, but this is not yet implemented because it is * tedious to determine whether or not tracking was set up in the * superproject. */ if (track) setup_tracking(name, tracking_name, track, quiet); for (i = 0; i < submodule_entry_list.entry_nr; i++) { if (submodule_create_branch( submodule_entry_list.entries[i].repo, submodule_entry_list.entries[i].submodule, name, oid_to_hex(&submodule_entry_list.entries[i] .name_entry->oid), tracking_name, force, reflog, quiet, track, 0)) die(_("submodule '%s': cannot create branch '%s'"), submodule_entry_list.entries[i].submodule->name, name); } out: submodule_entry_list_release(&submodule_entry_list); free(branch_point); } void remove_merge_branch_state(struct repository *r) { unlink(git_path_merge_head(r)); unlink(git_path_merge_rr(r)); unlink(git_path_merge_msg(r)); unlink(git_path_merge_mode(r)); refs_delete_ref(get_main_ref_store(r), "", "AUTO_MERGE", NULL, REF_NO_DEREF); save_autostash_ref(r, "MERGE_AUTOSTASH"); } void remove_branch_state(struct repository *r, int verbose) { sequencer_post_commit_cleanup(r, verbose); unlink(git_path_squash_msg(r)); remove_merge_branch_state(r); } void die_if_checked_out(const char *branch, int ignore_current_worktree) { struct worktree **worktrees = get_worktrees(); for (int i = 0; worktrees[i]; i++) { if (worktrees[i]->is_current && ignore_current_worktree) continue; if (is_shared_symref(worktrees[i], "HEAD", branch)) { skip_prefix(branch, "refs/heads/", &branch); die(_("'%s' is already used by worktree at '%s'"), branch, worktrees[i]->path); } } free_worktrees(worktrees); } git-cinnabar-0.7.0/git-core/branch.h000064400000000000000000000123351046102023000153110ustar 00000000000000#ifndef BRANCH_H #define BRANCH_H struct repository; struct strbuf; enum branch_track { BRANCH_TRACK_UNSPECIFIED = -1, BRANCH_TRACK_NEVER = 0, BRANCH_TRACK_REMOTE, BRANCH_TRACK_ALWAYS, BRANCH_TRACK_EXPLICIT, BRANCH_TRACK_OVERRIDE, BRANCH_TRACK_INHERIT, BRANCH_TRACK_SIMPLE, }; extern enum branch_track git_branch_track; /* Functions for acting on the information about branches. */ /** * Sets branch..{remote,merge} config settings such that * new_ref tracks orig_ref according to the specified tracking mode. * * - new_ref is the name of the branch that we are setting tracking * for. * * - orig_ref is the name of the ref that is 'upstream' of new_ref. * orig_ref will be expanded with DWIM so that the config settings * are in the correct format e.g. "refs/remotes/origin/main" instead * of "origin/main". * * - track is the tracking mode e.g. BRANCH_TRACK_REMOTE causes * new_ref to track orig_ref directly, whereas BRANCH_TRACK_INHERIT * causes new_ref to track whatever orig_ref tracks. * * - quiet suppresses tracking information. */ void dwim_and_setup_tracking(struct repository *r, const char *new_ref, const char *orig_ref, enum branch_track track, int quiet); /* * Creates a new branch, where: * * - r is the repository to add a branch to * * - name is the new branch name * * - start_name is the name of the existing branch that the new branch should * start from * * - force enables overwriting an existing (non-head) branch * * - clobber_head_ok, when enabled with 'force', allows the currently * checked out (head) branch to be overwritten * * - reflog creates a reflog for the branch * * - quiet suppresses tracking information * * - track causes the new branch to be configured to merge the remote branch * that start_name is a tracking branch for (if any). * * - dry_run causes the branch to be validated but not created. * */ void create_branch(struct repository *r, const char *name, const char *start_name, int force, int clobber_head_ok, int reflog, int quiet, enum branch_track track, int dry_run); /* * Creates a new branch in a repository and its submodules (and its * submodules, recursively). The parameters are mostly analogous to * those of create_branch() except for start_name, which is represented * by two different parameters: * * - start_committish is the commit-ish, in repository r, that determines * which commits the branches will point to. The superproject branch * will point to the commit of start_committish and the submodule * branches will point to the gitlink commit oids in start_committish's * tree. * * - tracking_name is the name of the ref, in repository r, that will be * used to set up tracking information. This value is propagated to * all submodules, which will evaluate the ref using their own ref * stores. If NULL, this defaults to start_committish. * * When this function is called on the superproject, start_committish * can be any user-provided ref and tracking_name can be NULL (similar * to create_branches()). But when recursing through submodules, * start_committish is the plain gitlink commit oid. Since the oid cannot * be used for tracking information, tracking_name is propagated and * used for tracking instead. */ void create_branches_recursively(struct repository *r, const char *name, const char *start_committish, const char *tracking_name, int force, int reflog, int quiet, enum branch_track track, int dry_run); /* * If the branch at 'refname' is currently checked out in a worktree, * then return the path to that worktree. */ const char *branch_checked_out(const char *refname); /* * Check if 'name' can be a valid name for a branch; die otherwise. * Return 1 if the named branch already exists; return 0 otherwise. * Fill ref with the full refname for the branch. */ int validate_branchname(const char *name, struct strbuf *ref); /* * Check if a branch 'name' can be created as a new branch; die otherwise. * 'force' can be used when it is OK for the named branch already exists. * Return 1 if the named branch already exists; return 0 otherwise. * Fill ref with the full refname for the branch. */ int validate_new_branchname(const char *name, struct strbuf *ref, int force); /* * Remove information about the merge state on the current * branch. (E.g., MERGE_HEAD) */ void remove_merge_branch_state(struct repository *r); /* * Remove information about the state of working on the current * branch. (E.g., MERGE_HEAD) */ void remove_branch_state(struct repository *r, int verbose); /* * Configure local branch "local" as downstream to branch "remote" * from remote "origin". Used by git branch --set-upstream. * Returns 0 on success. */ #define BRANCH_CONFIG_VERBOSE 01 int install_branch_config(int flag, const char *local, const char *origin, const char *remote); /* * Read branch description */ int read_branch_desc(struct strbuf *, const char *branch_name); /* * Check if a branch is checked out in the main worktree or any linked * worktree and die (with a message describing its checkout location) if * it is. */ void die_if_checked_out(const char *branch, int ignore_current_worktree); #endif git-cinnabar-0.7.0/git-core/builtin/add.c000064400000000000000000000412661046102023000162520ustar 00000000000000/* * "git add" builtin command * * Copyright (C) 2006 Linus Torvalds */ #include "builtin.h" #include "advice.h" #include "config.h" #include "lockfile.h" #include "editor.h" #include "dir.h" #include "gettext.h" #include "pathspec.h" #include "run-command.h" #include "parse-options.h" #include "path.h" #include "preload-index.h" #include "diff.h" #include "read-cache.h" #include "revision.h" #include "bulk-checkin.h" #include "strvec.h" #include "submodule.h" #include "add-interactive.h" static const char * const builtin_add_usage[] = { N_("git add [] [--] ..."), NULL }; static int patch_interactive, add_interactive, edit_interactive; static int take_worktree_changes; static int add_renormalize; static int pathspec_file_nul; static int include_sparse; static const char *pathspec_from_file; static int chmod_pathspec(struct repository *repo, struct pathspec *pathspec, char flip, int show_only) { int ret = 0; for (size_t i = 0; i < repo->index->cache_nr; i++) { struct cache_entry *ce = repo->index->cache[i]; int err; if (!include_sparse && (ce_skip_worktree(ce) || !path_in_sparse_checkout(ce->name, repo->index))) continue; if (pathspec && !ce_path_match(repo->index, ce, pathspec, NULL)) continue; if (!show_only) err = chmod_index_entry(repo->index, ce, flip); else err = S_ISREG(ce->ce_mode) ? 0 : -1; if (err < 0) ret = error(_("cannot chmod %cx '%s'"), flip, ce->name); } return ret; } static int renormalize_tracked_files(struct repository *repo, const struct pathspec *pathspec, int flags) { int retval = 0; for (size_t i = 0; i < repo->index->cache_nr; i++) { struct cache_entry *ce = repo->index->cache[i]; if (!include_sparse && (ce_skip_worktree(ce) || !path_in_sparse_checkout(ce->name, repo->index))) continue; if (ce_stage(ce)) continue; /* do not touch unmerged paths */ if (!S_ISREG(ce->ce_mode) && !S_ISLNK(ce->ce_mode)) continue; /* do not touch non blobs */ if (pathspec && !ce_path_match(repo->index, ce, pathspec, NULL)) continue; retval |= add_file_to_index(repo->index, ce->name, flags | ADD_CACHE_RENORMALIZE); } return retval; } static char *prune_directory(struct repository *repo, struct dir_struct *dir, struct pathspec *pathspec, int prefix) { char *seen; int i; struct dir_entry **src, **dst; seen = xcalloc(pathspec->nr, 1); src = dst = dir->entries; i = dir->nr; while (--i >= 0) { struct dir_entry *entry = *src++; if (dir_path_match(repo->index, entry, pathspec, prefix, seen)) *dst++ = entry; } dir->nr = dst - dir->entries; add_pathspec_matches_against_index(pathspec, repo->index, seen, PS_IGNORE_SKIP_WORKTREE); return seen; } static int refresh(struct repository *repo, int verbose, const struct pathspec *pathspec) { char *seen; int i, ret = 0; char *skip_worktree_seen = NULL; struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP; unsigned int flags = REFRESH_IGNORE_SKIP_WORKTREE | (verbose ? REFRESH_IN_PORCELAIN : REFRESH_QUIET); seen = xcalloc(pathspec->nr, 1); refresh_index(repo->index, flags, pathspec, seen, _("Unstaged changes after refreshing the index:")); for (i = 0; i < pathspec->nr; i++) { if (!seen[i]) { const char *path = pathspec->items[i].original; if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) || !path_in_sparse_checkout(path, repo->index)) { string_list_append(&only_match_skip_worktree, pathspec->items[i].original); } else { die(_("pathspec '%s' did not match any files"), pathspec->items[i].original); } } } if (only_match_skip_worktree.nr) { advise_on_updating_sparse_paths(&only_match_skip_worktree); ret = 1; } free(seen); free(skip_worktree_seen); string_list_clear(&only_match_skip_worktree, 0); return ret; } int interactive_add(struct repository *repo, const char **argv, const char *prefix, int patch) { struct pathspec pathspec; int ret; parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_FULL | PATHSPEC_SYMLINK_LEADING_PATH | PATHSPEC_PREFIX_ORIGIN, prefix, argv); if (patch) ret = !!run_add_p(repo, ADD_P_ADD, NULL, &pathspec); else ret = !!run_add_i(repo, &pathspec); clear_pathspec(&pathspec); return ret; } static int edit_patch(struct repository *repo, int argc, const char **argv, const char *prefix) { char *file = repo_git_path(repo, "ADD_EDIT.patch"); struct child_process child = CHILD_PROCESS_INIT; struct rev_info rev; int out; struct stat st; repo_config(repo, git_diff_basic_config, NULL); if (repo_read_index(repo) < 0) die(_("could not read the index")); repo_init_revisions(repo, &rev, prefix); rev.diffopt.context = 7; argc = setup_revisions(argc, argv, &rev, NULL); rev.diffopt.output_format = DIFF_FORMAT_PATCH; rev.diffopt.use_color = 0; rev.diffopt.flags.ignore_dirty_submodules = 1; out = xopen(file, O_CREAT | O_WRONLY | O_TRUNC, 0666); rev.diffopt.file = xfdopen(out, "w"); rev.diffopt.close_file = 1; run_diff_files(&rev, 0); if (launch_editor(file, NULL, NULL)) die(_("editing patch failed")); if (stat(file, &st)) die_errno(_("could not stat '%s'"), file); if (!st.st_size) die(_("empty patch. aborted")); child.git_cmd = 1; strvec_pushl(&child.args, "apply", "--recount", "--cached", file, NULL); if (run_command(&child)) die(_("could not apply '%s'"), file); unlink(file); free(file); release_revisions(&rev); return 0; } static const char ignore_error[] = N_("The following paths are ignored by one of your .gitignore files:\n"); static int verbose, show_only, ignored_too, refresh_only; static int ignore_add_errors, intent_to_add, ignore_missing; static int warn_on_embedded_repo = 1; #define ADDREMOVE_DEFAULT 1 static int addremove = ADDREMOVE_DEFAULT; static int addremove_explicit = -1; /* unspecified */ static char *chmod_arg; static int ignore_removal_cb(const struct option *opt, const char *arg, int unset) { BUG_ON_OPT_ARG(arg); /* if we are told to ignore, we are not adding removals */ *(int *)opt->value = !unset ? 0 : 1; return 0; } static struct option builtin_add_options[] = { OPT__DRY_RUN(&show_only, N_("dry run")), OPT__VERBOSE(&verbose, N_("be verbose")), OPT_GROUP(""), OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")), OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")), OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")), OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0), OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")), OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")), OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")), OPT_BOOL('A', "all", &addremove_explicit, N_("add changes from all tracked and untracked files")), OPT_CALLBACK_F(0, "ignore-removal", &addremove_explicit, NULL /* takes no arguments */, N_("ignore paths removed in the working tree (same as --no-all)"), PARSE_OPT_NOARG, ignore_removal_cb), OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")), OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")), OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")), OPT_BOOL(0, "sparse", &include_sparse, N_("allow updating entries outside of the sparse-checkout cone")), OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x", N_("override the executable bit of the listed files")), OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo, N_("warn when adding an embedded repository")), OPT_PATHSPEC_FROM_FILE(&pathspec_from_file), OPT_PATHSPEC_FILE_NUL(&pathspec_file_nul), OPT_END(), }; static int add_config(const char *var, const char *value, const struct config_context *ctx, void *cb) { if (!strcmp(var, "add.ignoreerrors") || !strcmp(var, "add.ignore-errors")) { ignore_add_errors = git_config_bool(var, value); return 0; } if (git_color_config(var, value, cb) < 0) return -1; return git_default_config(var, value, ctx, cb); } static const char embedded_advice[] = N_( "You've added another git repository inside your current repository.\n" "Clones of the outer repository will not contain the contents of\n" "the embedded repository and will not know how to obtain it.\n" "If you meant to add a submodule, use:\n" "\n" " git submodule add %s\n" "\n" "If you added this path by mistake, you can remove it from the\n" "index with:\n" "\n" " git rm --cached %s\n" "\n" "See \"git help submodule\" for more information." ); static void check_embedded_repo(const char *path) { struct strbuf name = STRBUF_INIT; static int adviced_on_embedded_repo = 0; if (!warn_on_embedded_repo) return; if (!ends_with(path, "/")) return; /* Drop trailing slash for aesthetics */ strbuf_addstr(&name, path); strbuf_strip_suffix(&name, "/"); warning(_("adding embedded git repository: %s"), name.buf); if (!adviced_on_embedded_repo) { advise_if_enabled(ADVICE_ADD_EMBEDDED_REPO, embedded_advice, name.buf, name.buf); adviced_on_embedded_repo = 1; } strbuf_release(&name); } static int add_files(struct repository *repo, struct dir_struct *dir, int flags) { int i, exit_status = 0; struct string_list matched_sparse_paths = STRING_LIST_INIT_NODUP; if (dir->ignored_nr) { fprintf(stderr, _(ignore_error)); for (i = 0; i < dir->ignored_nr; i++) fprintf(stderr, "%s\n", dir->ignored[i]->name); advise_if_enabled(ADVICE_ADD_IGNORED_FILE, _("Use -f if you really want to add them.")); exit_status = 1; } for (i = 0; i < dir->nr; i++) { if (!include_sparse && !path_in_sparse_checkout(dir->entries[i]->name, repo->index)) { string_list_append(&matched_sparse_paths, dir->entries[i]->name); continue; } if (add_file_to_index(repo->index, dir->entries[i]->name, flags)) { if (!ignore_add_errors) die(_("adding files failed")); exit_status = 1; } else { check_embedded_repo(dir->entries[i]->name); } } if (matched_sparse_paths.nr) { advise_on_updating_sparse_paths(&matched_sparse_paths); exit_status = 1; } string_list_clear(&matched_sparse_paths, 0); return exit_status; } int cmd_add(int argc, const char **argv, const char *prefix, struct repository *repo) { int exit_status = 0; struct pathspec pathspec; struct dir_struct dir = DIR_INIT; int flags; int add_new_files; int require_pathspec; char *seen = NULL; char *ps_matched = NULL; struct lock_file lock_file = LOCK_INIT; if (repo) repo_config(repo, add_config, NULL); argc = parse_options(argc, argv, prefix, builtin_add_options, builtin_add_usage, PARSE_OPT_KEEP_ARGV0); if (patch_interactive) add_interactive = 1; if (add_interactive) { if (show_only) die(_("options '%s' and '%s' cannot be used together"), "--dry-run", "--interactive/--patch"); if (pathspec_from_file) die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--interactive/--patch"); exit(interactive_add(repo, argv + 1, prefix, patch_interactive)); } if (edit_interactive) { if (pathspec_from_file) die(_("options '%s' and '%s' cannot be used together"), "--pathspec-from-file", "--edit"); return(edit_patch(repo, argc, argv, prefix)); } argc--; argv++; if (0 <= addremove_explicit) addremove = addremove_explicit; else if (take_worktree_changes && ADDREMOVE_DEFAULT) addremove = 0; /* "-u" was given but not "-A" */ if (addremove && take_worktree_changes) die(_("options '%s' and '%s' cannot be used together"), "-A", "-u"); if (!show_only && ignore_missing) die(_("the option '%s' requires '%s'"), "--ignore-missing", "--dry-run"); if (chmod_arg && ((chmod_arg[0] != '-' && chmod_arg[0] != '+') || chmod_arg[1] != 'x' || chmod_arg[2])) die(_("--chmod param '%s' must be either -x or +x"), chmod_arg); add_new_files = !take_worktree_changes && !refresh_only && !add_renormalize; require_pathspec = !(take_worktree_changes || (0 < addremove_explicit)); prepare_repo_settings(repo); repo->settings.command_requires_full_index = 0; repo_hold_locked_index(repo, &lock_file, LOCK_DIE_ON_ERROR); /* * Check the "pathspec '%s' did not match any files" block * below before enabling new magic. */ parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_FULL | PATHSPEC_SYMLINK_LEADING_PATH, prefix, argv); if (pathspec_from_file) { if (pathspec.nr) die(_("'%s' and pathspec arguments cannot be used together"), "--pathspec-from-file"); parse_pathspec_file(&pathspec, 0, PATHSPEC_PREFER_FULL | PATHSPEC_SYMLINK_LEADING_PATH, prefix, pathspec_from_file, pathspec_file_nul); } else if (pathspec_file_nul) { die(_("the option '%s' requires '%s'"), "--pathspec-file-nul", "--pathspec-from-file"); } if (require_pathspec && pathspec.nr == 0) { fprintf(stderr, _("Nothing specified, nothing added.\n")); advise_if_enabled(ADVICE_ADD_EMPTY_PATHSPEC, _("Maybe you wanted to say 'git add .'?")); return 0; } if (!take_worktree_changes && addremove_explicit < 0 && pathspec.nr) /* Turn "git add pathspec..." to "git add -A pathspec..." */ addremove = 1; flags = ((verbose ? ADD_CACHE_VERBOSE : 0) | (show_only ? ADD_CACHE_PRETEND : 0) | (intent_to_add ? ADD_CACHE_INTENT : 0) | (ignore_add_errors ? ADD_CACHE_IGNORE_ERRORS : 0) | (!(addremove || take_worktree_changes) ? ADD_CACHE_IGNORE_REMOVAL : 0)); if (repo_read_index_preload(repo, &pathspec, 0) < 0) die(_("index file corrupt")); die_in_unpopulated_submodule(repo->index, prefix); die_path_inside_submodule(repo->index, &pathspec); enable_fscache(0); /* We do not really re-read the index but update the up-to-date flags */ preload_index(repo->index, &pathspec, 0); if (add_new_files) { int baselen; /* Set up the default git porcelain excludes */ if (!ignored_too) { dir.flags |= DIR_COLLECT_IGNORED; setup_standard_excludes(&dir); } /* This picks up the paths that are not tracked */ baselen = fill_directory(&dir, repo->index, &pathspec); if (pathspec.nr) seen = prune_directory(repo, &dir, &pathspec, baselen); } if (refresh_only) { exit_status |= refresh(repo, verbose, &pathspec); goto finish; } if (pathspec.nr) { int i; char *skip_worktree_seen = NULL; struct string_list only_match_skip_worktree = STRING_LIST_INIT_NODUP; if (!seen) seen = find_pathspecs_matching_against_index(&pathspec, repo->index, PS_IGNORE_SKIP_WORKTREE); /* * file_exists() assumes exact match */ GUARD_PATHSPEC(&pathspec, PATHSPEC_FROMTOP | PATHSPEC_LITERAL | PATHSPEC_GLOB | PATHSPEC_ICASE | PATHSPEC_EXCLUDE | PATHSPEC_ATTR); for (i = 0; i < pathspec.nr; i++) { const char *path = pathspec.items[i].match; if (pathspec.items[i].magic & PATHSPEC_EXCLUDE) continue; if (seen[i]) continue; if (!include_sparse && matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) { string_list_append(&only_match_skip_worktree, pathspec.items[i].original); continue; } /* Don't complain at 'git add .' on empty repo */ if (!path[0]) continue; if ((pathspec.items[i].magic & (PATHSPEC_GLOB | PATHSPEC_ICASE)) || !file_exists(path)) { if (ignore_missing) { int dtype = DT_UNKNOWN; if (is_excluded(&dir, repo->index, path, &dtype)) dir_add_ignored(&dir, repo->index, path, pathspec.items[i].len); } else die(_("pathspec '%s' did not match any files"), pathspec.items[i].original); } } if (only_match_skip_worktree.nr) { advise_on_updating_sparse_paths(&only_match_skip_worktree); exit_status = 1; } free(seen); free(skip_worktree_seen); string_list_clear(&only_match_skip_worktree, 0); } begin_odb_transaction(); ps_matched = xcalloc(pathspec.nr, 1); if (add_renormalize) exit_status |= renormalize_tracked_files(repo, &pathspec, flags); else exit_status |= add_files_to_cache(repo, prefix, &pathspec, ps_matched, include_sparse, flags); if (take_worktree_changes && !add_renormalize && !ignore_add_errors && report_path_error(ps_matched, &pathspec)) exit(128); if (add_new_files) exit_status |= add_files(repo, &dir, flags); if (chmod_arg && pathspec.nr) exit_status |= chmod_pathspec(repo, &pathspec, chmod_arg[0], show_only); end_odb_transaction(); finish: if (write_locked_index(repo->index, &lock_file, COMMIT_LOCK | SKIP_IF_UNCHANGED)) die(_("unable to write new index file")); free(ps_matched); dir_clear(&dir); clear_pathspec(&pathspec); enable_fscache(0); return exit_status; } git-cinnabar-0.7.0/git-core/builtin/am.c000064400000000000000000002025241046102023000161130ustar 00000000000000/* * Builtin "git am" * * Based on git-am.sh by Junio C Hamano. */ #define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "abspath.h" #include "advice.h" #include "config.h" #include "editor.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "parse-options.h" #include "dir.h" #include "run-command.h" #include "hook.h" #include "quote.h" #include "tempfile.h" #include "lockfile.h" #include "cache-tree.h" #include "refs.h" #include "commit.h" #include "diff.h" #include "unpack-trees.h" #include "branch.h" #include "object-name.h" #include "preload-index.h" #include "sequencer.h" #include "revision.h" #include "merge-recursive.h" #include "log-tree.h" #include "notes-utils.h" #include "rerere.h" #include "mailinfo.h" #include "apply.h" #include "string-list.h" #include "pager.h" #include "path.h" #include "pretty.h" /** * Returns the length of the first line of msg. */ static int linelen(const char *msg) { return strchrnul(msg, '\n') - msg; } /** * Returns true if `str` consists of only whitespace, false otherwise. */ static int str_isspace(const char *str) { for (; *str; str++) if (!isspace(*str)) return 0; return 1; } enum patch_format { PATCH_FORMAT_UNKNOWN = 0, PATCH_FORMAT_MBOX, PATCH_FORMAT_STGIT, PATCH_FORMAT_STGIT_SERIES, PATCH_FORMAT_HG, PATCH_FORMAT_MBOXRD }; enum keep_type { KEEP_FALSE = 0, KEEP_TRUE, /* pass -k flag to git-mailinfo */ KEEP_NON_PATCH /* pass -b flag to git-mailinfo */ }; enum scissors_type { SCISSORS_UNSET = -1, SCISSORS_FALSE = 0, /* pass --no-scissors to git-mailinfo */ SCISSORS_TRUE /* pass --scissors to git-mailinfo */ }; enum signoff_type { SIGNOFF_FALSE = 0, SIGNOFF_TRUE = 1, SIGNOFF_EXPLICIT /* --signoff was set on the command-line */ }; enum resume_type { RESUME_FALSE = 0, RESUME_APPLY, RESUME_RESOLVED, RESUME_SKIP, RESUME_ABORT, RESUME_QUIT, RESUME_SHOW_PATCH_RAW, RESUME_SHOW_PATCH_DIFF, RESUME_ALLOW_EMPTY, }; enum empty_action { STOP_ON_EMPTY_COMMIT = 0, /* output errors and stop in the middle of an am session */ DROP_EMPTY_COMMIT, /* skip with a notice message, unless "--quiet" has been passed */ KEEP_EMPTY_COMMIT, /* keep recording as empty commits */ }; struct am_state { /* state directory path */ char *dir; /* current and last patch numbers, 1-indexed */ int cur; int last; /* commit metadata and message */ char *author_name; char *author_email; char *author_date; char *msg; size_t msg_len; /* when --rebasing, records the original commit the patch came from */ struct object_id orig_commit; /* number of digits in patch filename */ int prec; /* various operating modes and command line options */ int interactive; int no_verify; int threeway; int quiet; int signoff; /* enum signoff_type */ int utf8; int keep; /* enum keep_type */ int message_id; int scissors; /* enum scissors_type */ int quoted_cr; /* enum quoted_cr_action */ int empty_type; /* enum empty_action */ struct strvec git_apply_opts; const char *resolvemsg; int committer_date_is_author_date; int ignore_date; int allow_rerere_autoupdate; const char *sign_commit; int rebasing; }; /** * Initializes am_state with the default values. */ static void am_state_init(struct am_state *state) { int gpgsign; memset(state, 0, sizeof(*state)); state->dir = git_pathdup("rebase-apply"); state->prec = 4; git_config_get_bool("am.threeway", &state->threeway); state->utf8 = 1; git_config_get_bool("am.messageid", &state->message_id); state->scissors = SCISSORS_UNSET; state->quoted_cr = quoted_cr_unset; strvec_init(&state->git_apply_opts); if (!git_config_get_bool("commit.gpgsign", &gpgsign)) state->sign_commit = gpgsign ? "" : NULL; } /** * Releases memory allocated by an am_state. */ static void am_state_release(struct am_state *state) { free(state->dir); free(state->author_name); free(state->author_email); free(state->author_date); free(state->msg); strvec_clear(&state->git_apply_opts); } static int am_option_parse_quoted_cr(const struct option *opt, const char *arg, int unset) { BUG_ON_OPT_NEG(unset); if (mailinfo_parse_quoted_cr_action(arg, opt->value) != 0) return error(_("bad action '%s' for '%s'"), arg, "--quoted-cr"); return 0; } static int am_option_parse_empty(const struct option *opt, const char *arg, int unset) { int *opt_value = opt->value; BUG_ON_OPT_NEG(unset); if (!strcmp(arg, "stop")) *opt_value = STOP_ON_EMPTY_COMMIT; else if (!strcmp(arg, "drop")) *opt_value = DROP_EMPTY_COMMIT; else if (!strcmp(arg, "keep")) *opt_value = KEEP_EMPTY_COMMIT; else return error(_("invalid value for '%s': '%s'"), "--empty", arg); return 0; } /** * Returns path relative to the am_state directory. */ static inline const char *am_path(const struct am_state *state, const char *path) { return mkpath("%s/%s", state->dir, path); } /** * For convenience to call write_file() */ static void write_state_text(const struct am_state *state, const char *name, const char *string) { write_file(am_path(state, name), "%s", string); } static void write_state_count(const struct am_state *state, const char *name, int value) { write_file(am_path(state, name), "%d", value); } static void write_state_bool(const struct am_state *state, const char *name, int value) { write_state_text(state, name, value ? "t" : "f"); } /** * If state->quiet is false, calls fprintf(fp, fmt, ...), and appends a newline * at the end. */ __attribute__((format (printf, 3, 4))) static void say(const struct am_state *state, FILE *fp, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (!state->quiet) { vfprintf(fp, fmt, ap); putc('\n', fp); } va_end(ap); } /** * Returns 1 if there is an am session in progress, 0 otherwise. */ static int am_in_progress(const struct am_state *state) { struct stat st; if (lstat(state->dir, &st) < 0 || !S_ISDIR(st.st_mode)) return 0; if (lstat(am_path(state, "last"), &st) || !S_ISREG(st.st_mode)) return 0; if (lstat(am_path(state, "next"), &st) || !S_ISREG(st.st_mode)) return 0; return 1; } /** * Reads the contents of `file` in the `state` directory into `sb`. Returns the * number of bytes read on success, -1 if the file does not exist. If `trim` is * set, trailing whitespace will be removed. */ static int read_state_file(struct strbuf *sb, const struct am_state *state, const char *file, int trim) { strbuf_reset(sb); if (strbuf_read_file(sb, am_path(state, file), 0) >= 0) { if (trim) strbuf_trim(sb); return sb->len; } if (errno == ENOENT) return -1; die_errno(_("could not read '%s'"), am_path(state, file)); } /** * Reads and parses the state directory's "author-script" file, and sets * state->author_name, state->author_email and state->author_date accordingly. * Returns 0 on success, -1 if the file could not be parsed. * * The author script is of the format: * * GIT_AUTHOR_NAME='$author_name' * GIT_AUTHOR_EMAIL='$author_email' * GIT_AUTHOR_DATE='$author_date' * * where $author_name, $author_email and $author_date are quoted. We are strict * with our parsing, as the file was meant to be eval'd in the old git-am.sh * script, and thus if the file differs from what this function expects, it is * better to bail out than to do something that the user does not expect. */ static int read_am_author_script(struct am_state *state) { const char *filename = am_path(state, "author-script"); assert(!state->author_name); assert(!state->author_email); assert(!state->author_date); return read_author_script(filename, &state->author_name, &state->author_email, &state->author_date, 1); } /** * Saves state->author_name, state->author_email and state->author_date in the * state directory's "author-script" file. */ static void write_author_script(const struct am_state *state) { struct strbuf sb = STRBUF_INIT; strbuf_addstr(&sb, "GIT_AUTHOR_NAME="); sq_quote_buf(&sb, state->author_name); strbuf_addch(&sb, '\n'); strbuf_addstr(&sb, "GIT_AUTHOR_EMAIL="); sq_quote_buf(&sb, state->author_email); strbuf_addch(&sb, '\n'); strbuf_addstr(&sb, "GIT_AUTHOR_DATE="); sq_quote_buf(&sb, state->author_date); strbuf_addch(&sb, '\n'); write_state_text(state, "author-script", sb.buf); strbuf_release(&sb); } /** * Reads the commit message from the state directory's "final-commit" file, * setting state->msg to its contents and state->msg_len to the length of its * contents in bytes. * * Returns 0 on success, -1 if the file does not exist. */ static int read_commit_msg(struct am_state *state) { struct strbuf sb = STRBUF_INIT; assert(!state->msg); if (read_state_file(&sb, state, "final-commit", 0) < 0) { strbuf_release(&sb); return -1; } state->msg = strbuf_detach(&sb, &state->msg_len); return 0; } /** * Saves state->msg in the state directory's "final-commit" file. */ static void write_commit_msg(const struct am_state *state) { const char *filename = am_path(state, "final-commit"); write_file_buf(filename, state->msg, state->msg_len); } /** * Loads state from disk. */ static void am_load(struct am_state *state) { struct strbuf sb = STRBUF_INIT; if (read_state_file(&sb, state, "next", 1) < 0) BUG("state file 'next' does not exist"); state->cur = strtol(sb.buf, NULL, 10); if (read_state_file(&sb, state, "last", 1) < 0) BUG("state file 'last' does not exist"); state->last = strtol(sb.buf, NULL, 10); if (read_am_author_script(state) < 0) die(_("could not parse author script")); read_commit_msg(state); if (read_state_file(&sb, state, "original-commit", 1) < 0) oidclr(&state->orig_commit, the_repository->hash_algo); else if (get_oid_hex(sb.buf, &state->orig_commit) < 0) die(_("could not parse %s"), am_path(state, "original-commit")); read_state_file(&sb, state, "threeway", 1); state->threeway = !strcmp(sb.buf, "t"); read_state_file(&sb, state, "quiet", 1); state->quiet = !strcmp(sb.buf, "t"); read_state_file(&sb, state, "sign", 1); state->signoff = !strcmp(sb.buf, "t"); read_state_file(&sb, state, "utf8", 1); state->utf8 = !strcmp(sb.buf, "t"); if (file_exists(am_path(state, "rerere-autoupdate"))) { read_state_file(&sb, state, "rerere-autoupdate", 1); state->allow_rerere_autoupdate = strcmp(sb.buf, "t") ? RERERE_NOAUTOUPDATE : RERERE_AUTOUPDATE; } else { state->allow_rerere_autoupdate = 0; } read_state_file(&sb, state, "keep", 1); if (!strcmp(sb.buf, "t")) state->keep = KEEP_TRUE; else if (!strcmp(sb.buf, "b")) state->keep = KEEP_NON_PATCH; else state->keep = KEEP_FALSE; read_state_file(&sb, state, "messageid", 1); state->message_id = !strcmp(sb.buf, "t"); read_state_file(&sb, state, "scissors", 1); if (!strcmp(sb.buf, "t")) state->scissors = SCISSORS_TRUE; else if (!strcmp(sb.buf, "f")) state->scissors = SCISSORS_FALSE; else state->scissors = SCISSORS_UNSET; read_state_file(&sb, state, "quoted-cr", 1); if (!*sb.buf) state->quoted_cr = quoted_cr_unset; else if (mailinfo_parse_quoted_cr_action(sb.buf, &state->quoted_cr) != 0) die(_("could not parse %s"), am_path(state, "quoted-cr")); read_state_file(&sb, state, "apply-opt", 1); strvec_clear(&state->git_apply_opts); if (sq_dequote_to_strvec(sb.buf, &state->git_apply_opts) < 0) die(_("could not parse %s"), am_path(state, "apply-opt")); state->rebasing = !!file_exists(am_path(state, "rebasing")); strbuf_release(&sb); } /** * Removes the am_state directory, forcefully terminating the current am * session. */ static void am_destroy(const struct am_state *state) { struct strbuf sb = STRBUF_INIT; strbuf_addstr(&sb, state->dir); remove_dir_recursively(&sb, 0); strbuf_release(&sb); } /** * Runs applypatch-msg hook. Returns its exit code. */ static int run_applypatch_msg_hook(struct am_state *state) { int ret = 0; assert(state->msg); if (!state->no_verify) ret = run_hooks_l(the_repository, "applypatch-msg", am_path(state, "final-commit"), NULL); if (!ret) { FREE_AND_NULL(state->msg); if (read_commit_msg(state) < 0) die(_("'%s' was deleted by the applypatch-msg hook"), am_path(state, "final-commit")); } return ret; } /** * Runs post-rewrite hook. Returns it exit code. */ static int run_post_rewrite_hook(const struct am_state *state) { struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT; strvec_push(&opt.args, "rebase"); opt.path_to_stdin = am_path(state, "rewritten"); return run_hooks_opt(the_repository, "post-rewrite", &opt); } /** * Reads the state directory's "rewritten" file, and copies notes from the old * commits listed in the file to their rewritten commits. * * Returns 0 on success, -1 on failure. */ static int copy_notes_for_rebase(const struct am_state *state) { struct notes_rewrite_cfg *c; struct strbuf sb = STRBUF_INIT; const char *invalid_line = _("Malformed input line: '%s'."); const char *msg = "Notes added by 'git rebase'"; FILE *fp; int ret = 0; assert(state->rebasing); c = init_copy_notes_for_rewrite("rebase"); if (!c) return 0; fp = xfopen(am_path(state, "rewritten"), "r"); while (!strbuf_getline_lf(&sb, fp)) { struct object_id from_obj, to_obj; const char *p; if (sb.len != the_hash_algo->hexsz * 2 + 1) { ret = error(invalid_line, sb.buf); goto finish; } if (parse_oid_hex(sb.buf, &from_obj, &p)) { ret = error(invalid_line, sb.buf); goto finish; } if (*p != ' ') { ret = error(invalid_line, sb.buf); goto finish; } if (get_oid_hex(p + 1, &to_obj)) { ret = error(invalid_line, sb.buf); goto finish; } if (copy_note_for_rewrite(c, &from_obj, &to_obj)) ret = error(_("Failed to copy notes from '%s' to '%s'"), oid_to_hex(&from_obj), oid_to_hex(&to_obj)); } finish: finish_copy_notes_for_rewrite(the_repository, c, msg); fclose(fp); strbuf_release(&sb); return ret; } /** * Determines if the file looks like a piece of RFC2822 mail by grabbing all * non-indented lines and checking if they look like they begin with valid * header field names. * * Returns 1 if the file looks like a piece of mail, 0 otherwise. */ static int is_mail(FILE *fp) { const char *header_regex = "^[!-9;-~]+:"; struct strbuf sb = STRBUF_INIT; regex_t regex; int ret = 1; if (fseek(fp, 0L, SEEK_SET)) die_errno(_("fseek failed")); if (regcomp(®ex, header_regex, REG_NOSUB | REG_EXTENDED)) die("invalid pattern: %s", header_regex); while (!strbuf_getline(&sb, fp)) { if (!sb.len) break; /* End of header */ /* Ignore indented folded lines */ if (*sb.buf == '\t' || *sb.buf == ' ') continue; /* It's a header if it matches header_regex */ if (regexec(®ex, sb.buf, 0, NULL, 0)) { ret = 0; goto done; } } done: regfree(®ex); strbuf_release(&sb); return ret; } /** * Attempts to detect the patch_format of the patches contained in `paths`, * returning the PATCH_FORMAT_* enum value. Returns PATCH_FORMAT_UNKNOWN if * detection fails. */ static int detect_patch_format(const char **paths) { enum patch_format ret = PATCH_FORMAT_UNKNOWN; struct strbuf l1 = STRBUF_INIT; struct strbuf l2 = STRBUF_INIT; struct strbuf l3 = STRBUF_INIT; FILE *fp; /* * We default to mbox format if input is from stdin and for directories */ if (!*paths || !strcmp(*paths, "-") || is_directory(*paths)) return PATCH_FORMAT_MBOX; /* * Otherwise, check the first few lines of the first patch, starting * from the first non-blank line, to try to detect its format. */ fp = xfopen(*paths, "r"); while (!strbuf_getline(&l1, fp)) { if (l1.len) break; } if (starts_with(l1.buf, "From ") || starts_with(l1.buf, "From: ")) { ret = PATCH_FORMAT_MBOX; goto done; } if (starts_with(l1.buf, "# This series applies on GIT commit")) { ret = PATCH_FORMAT_STGIT_SERIES; goto done; } if (!strcmp(l1.buf, "# HG changeset patch")) { ret = PATCH_FORMAT_HG; goto done; } strbuf_getline(&l2, fp); strbuf_getline(&l3, fp); /* * If the second line is empty and the third is a From, Author or Date * entry, this is likely an StGit patch. */ if (l1.len && !l2.len && (starts_with(l3.buf, "From:") || starts_with(l3.buf, "Author:") || starts_with(l3.buf, "Date:"))) { ret = PATCH_FORMAT_STGIT; goto done; } if (l1.len && is_mail(fp)) { ret = PATCH_FORMAT_MBOX; goto done; } done: fclose(fp); strbuf_release(&l1); strbuf_release(&l2); strbuf_release(&l3); return ret; } /** * Splits out individual email patches from `paths`, where each path is either * a mbox file or a Maildir. Returns 0 on success, -1 on failure. */ static int split_mail_mbox(struct am_state *state, const char **paths, int keep_cr, int mboxrd) { struct child_process cp = CHILD_PROCESS_INIT; struct strbuf last = STRBUF_INIT; int ret; cp.git_cmd = 1; strvec_push(&cp.args, "mailsplit"); strvec_pushf(&cp.args, "-d%d", state->prec); strvec_pushf(&cp.args, "-o%s", state->dir); strvec_push(&cp.args, "-b"); if (keep_cr) strvec_push(&cp.args, "--keep-cr"); if (mboxrd) strvec_push(&cp.args, "--mboxrd"); strvec_push(&cp.args, "--"); strvec_pushv(&cp.args, paths); ret = capture_command(&cp, &last, 8); if (ret) goto exit; state->cur = 1; state->last = strtol(last.buf, NULL, 10); exit: strbuf_release(&last); return ret ? -1 : 0; } /** * Callback signature for split_mail_conv(). The foreign patch should be * read from `in`, and the converted patch (in RFC2822 mail format) should be * written to `out`. Return 0 on success, or -1 on failure. */ typedef int (*mail_conv_fn)(FILE *out, FILE *in, int keep_cr); /** * Calls `fn` for each file in `paths` to convert the foreign patch to the * RFC2822 mail format suitable for parsing with git-mailinfo. * * Returns 0 on success, -1 on failure. */ static int split_mail_conv(mail_conv_fn fn, struct am_state *state, const char **paths, int keep_cr) { static const char *stdin_only[] = {"-", NULL}; int i; if (!*paths) paths = stdin_only; for (i = 0; *paths; paths++, i++) { FILE *in, *out; const char *mail; int ret; if (!strcmp(*paths, "-")) in = stdin; else in = fopen(*paths, "r"); if (!in) return error_errno(_("could not open '%s' for reading"), *paths); mail = mkpath("%s/%0*d", state->dir, state->prec, i + 1); out = fopen(mail, "w"); if (!out) { if (in != stdin) fclose(in); return error_errno(_("could not open '%s' for writing"), mail); } ret = fn(out, in, keep_cr); fclose(out); if (in != stdin) fclose(in); if (ret) return error(_("could not parse patch '%s'"), *paths); } state->cur = 1; state->last = i; return 0; } /** * A split_mail_conv() callback that converts an StGit patch to an RFC2822 * message suitable for parsing with git-mailinfo. */ static int stgit_patch_to_mail(FILE *out, FILE *in, int keep_cr UNUSED) { struct strbuf sb = STRBUF_INIT; int subject_printed = 0; while (!strbuf_getline_lf(&sb, in)) { const char *str; if (str_isspace(sb.buf)) continue; else if (skip_prefix(sb.buf, "Author:", &str)) fprintf(out, "From:%s\n", str); else if (starts_with(sb.buf, "From") || starts_with(sb.buf, "Date")) fprintf(out, "%s\n", sb.buf); else if (!subject_printed) { fprintf(out, "Subject: %s\n", sb.buf); subject_printed = 1; } else { fprintf(out, "\n%s\n", sb.buf); break; } } strbuf_reset(&sb); while (strbuf_fread(&sb, 8192, in) > 0) { fwrite(sb.buf, 1, sb.len, out); strbuf_reset(&sb); } strbuf_release(&sb); return 0; } /** * This function only supports a single StGit series file in `paths`. * * Given an StGit series file, converts the StGit patches in the series into * RFC2822 messages suitable for parsing with git-mailinfo, and queues them in * the state directory. * * Returns 0 on success, -1 on failure. */ static int split_mail_stgit_series(struct am_state *state, const char **paths, int keep_cr) { const char *series_dir; char *series_dir_buf; FILE *fp; struct strvec patches = STRVEC_INIT; struct strbuf sb = STRBUF_INIT; int ret; if (!paths[0] || paths[1]) return error(_("Only one StGIT patch series can be applied at once")); series_dir_buf = xstrdup(*paths); series_dir = dirname(series_dir_buf); fp = fopen(*paths, "r"); if (!fp) return error_errno(_("could not open '%s' for reading"), *paths); while (!strbuf_getline_lf(&sb, fp)) { if (*sb.buf == '#') continue; /* skip comment lines */ strvec_push(&patches, mkpath("%s/%s", series_dir, sb.buf)); } fclose(fp); strbuf_release(&sb); free(series_dir_buf); ret = split_mail_conv(stgit_patch_to_mail, state, patches.v, keep_cr); strvec_clear(&patches); return ret; } /** * A split_patches_conv() callback that converts a mercurial patch to a RFC2822 * message suitable for parsing with git-mailinfo. */ static int hg_patch_to_mail(FILE *out, FILE *in, int keep_cr UNUSED) { struct strbuf sb = STRBUF_INIT; int rc = 0; while (!strbuf_getline_lf(&sb, in)) { const char *str; if (skip_prefix(sb.buf, "# User ", &str)) fprintf(out, "From: %s\n", str); else if (skip_prefix(sb.buf, "# Date ", &str)) { timestamp_t timestamp; long tz, tz2; char *end; errno = 0; timestamp = parse_timestamp(str, &end, 10); if (errno) { rc = error(_("invalid timestamp")); goto exit; } if (!skip_prefix(end, " ", &str)) { rc = error(_("invalid Date line")); goto exit; } errno = 0; tz = strtol(str, &end, 10); if (errno) { rc = error(_("invalid timezone offset")); goto exit; } if (*end) { rc = error(_("invalid Date line")); goto exit; } /* * mercurial's timezone is in seconds west of UTC, * however git's timezone is in hours + minutes east of * UTC. Convert it. */ tz2 = labs(tz) / 3600 * 100 + labs(tz) % 3600 / 60; if (tz > 0) tz2 = -tz2; fprintf(out, "Date: %s\n", show_date(timestamp, tz2, DATE_MODE(RFC2822))); } else if (starts_with(sb.buf, "# ")) { continue; } else { fprintf(out, "\n%s\n", sb.buf); break; } } strbuf_reset(&sb); while (strbuf_fread(&sb, 8192, in) > 0) { fwrite(sb.buf, 1, sb.len, out); strbuf_reset(&sb); } exit: strbuf_release(&sb); return rc; } /** * Splits a list of files/directories into individual email patches. Each path * in `paths` must be a file/directory that is formatted according to * `patch_format`. * * Once split out, the individual email patches will be stored in the state * directory, with each patch's filename being its index, padded to state->prec * digits. * * state->cur will be set to the index of the first mail, and state->last will * be set to the index of the last mail. * * Set keep_cr to 0 to convert all lines ending with \r\n to end with \n, 1 * to disable this behavior, -1 to use the default configured setting. * * Returns 0 on success, -1 on failure. */ static int split_mail(struct am_state *state, enum patch_format patch_format, const char **paths, int keep_cr) { if (keep_cr < 0) { keep_cr = 0; git_config_get_bool("am.keepcr", &keep_cr); } switch (patch_format) { case PATCH_FORMAT_MBOX: return split_mail_mbox(state, paths, keep_cr, 0); case PATCH_FORMAT_STGIT: return split_mail_conv(stgit_patch_to_mail, state, paths, keep_cr); case PATCH_FORMAT_STGIT_SERIES: return split_mail_stgit_series(state, paths, keep_cr); case PATCH_FORMAT_HG: return split_mail_conv(hg_patch_to_mail, state, paths, keep_cr); case PATCH_FORMAT_MBOXRD: return split_mail_mbox(state, paths, keep_cr, 1); default: BUG("invalid patch_format"); } return -1; } /** * Setup a new am session for applying patches */ static void am_setup(struct am_state *state, enum patch_format patch_format, const char **paths, int keep_cr) { struct object_id curr_head; const char *str; struct strbuf sb = STRBUF_INIT; if (!patch_format) patch_format = detect_patch_format(paths); if (!patch_format) { fprintf_ln(stderr, _("Patch format detection failed.")); exit(128); } if (mkdir(state->dir, 0777) < 0 && errno != EEXIST) die_errno(_("failed to create directory '%s'"), state->dir); refs_delete_ref(get_main_ref_store(the_repository), NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); if (split_mail(state, patch_format, paths, keep_cr) < 0) { am_destroy(state); die(_("Failed to split patches.")); } if (state->rebasing) state->threeway = 1; write_state_bool(state, "threeway", state->threeway); write_state_bool(state, "quiet", state->quiet); write_state_bool(state, "sign", state->signoff); write_state_bool(state, "utf8", state->utf8); if (state->allow_rerere_autoupdate) write_state_bool(state, "rerere-autoupdate", state->allow_rerere_autoupdate == RERERE_AUTOUPDATE); switch (state->keep) { case KEEP_FALSE: str = "f"; break; case KEEP_TRUE: str = "t"; break; case KEEP_NON_PATCH: str = "b"; break; default: BUG("invalid value for state->keep"); } write_state_text(state, "keep", str); write_state_bool(state, "messageid", state->message_id); switch (state->scissors) { case SCISSORS_UNSET: str = ""; break; case SCISSORS_FALSE: str = "f"; break; case SCISSORS_TRUE: str = "t"; break; default: BUG("invalid value for state->scissors"); } write_state_text(state, "scissors", str); switch (state->quoted_cr) { case quoted_cr_unset: str = ""; break; case quoted_cr_nowarn: str = "nowarn"; break; case quoted_cr_warn: str = "warn"; break; case quoted_cr_strip: str = "strip"; break; default: BUG("invalid value for state->quoted_cr"); } write_state_text(state, "quoted-cr", str); sq_quote_argv(&sb, state->git_apply_opts.v); write_state_text(state, "apply-opt", sb.buf); if (state->rebasing) write_state_text(state, "rebasing", ""); else write_state_text(state, "applying", ""); if (!repo_get_oid(the_repository, "HEAD", &curr_head)) { write_state_text(state, "abort-safety", oid_to_hex(&curr_head)); if (!state->rebasing) refs_update_ref(get_main_ref_store(the_repository), "am", "ORIG_HEAD", &curr_head, NULL, 0, UPDATE_REFS_DIE_ON_ERR); } else { write_state_text(state, "abort-safety", ""); if (!state->rebasing) refs_delete_ref(get_main_ref_store(the_repository), NULL, "ORIG_HEAD", NULL, 0); } /* * NOTE: Since the "next" and "last" files determine if an am_state * session is in progress, they should be written last. */ write_state_count(state, "next", state->cur); write_state_count(state, "last", state->last); strbuf_release(&sb); } /** * Increments the patch pointer, and cleans am_state for the application of the * next patch. */ static void am_next(struct am_state *state) { struct object_id head; FREE_AND_NULL(state->author_name); FREE_AND_NULL(state->author_email); FREE_AND_NULL(state->author_date); FREE_AND_NULL(state->msg); state->msg_len = 0; unlink(am_path(state, "author-script")); unlink(am_path(state, "final-commit")); oidclr(&state->orig_commit, the_repository->hash_algo); unlink(am_path(state, "original-commit")); refs_delete_ref(get_main_ref_store(the_repository), NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); if (!repo_get_oid(the_repository, "HEAD", &head)) write_state_text(state, "abort-safety", oid_to_hex(&head)); else write_state_text(state, "abort-safety", ""); state->cur++; write_state_count(state, "next", state->cur); } /** * Returns the filename of the current patch email. */ static const char *msgnum(const struct am_state *state) { static struct strbuf sb = STRBUF_INIT; strbuf_reset(&sb); strbuf_addf(&sb, "%0*d", state->prec, state->cur); return sb.buf; } /** * Dies with a user-friendly message on how to proceed after resolving the * problem. This message can be overridden with state->resolvemsg. */ static void NORETURN die_user_resolve(const struct am_state *state) { if (state->resolvemsg) { advise_if_enabled(ADVICE_MERGE_CONFLICT, "%s", state->resolvemsg); } else { const char *cmdline = state->interactive ? "git am -i" : "git am"; struct strbuf sb = STRBUF_INIT; strbuf_addf(&sb, _("When you have resolved this problem, run \"%s --continue\".\n"), cmdline); strbuf_addf(&sb, _("If you prefer to skip this patch, run \"%s --skip\" instead.\n"), cmdline); if (advice_enabled(ADVICE_AM_WORK_DIR) && is_empty_or_missing_file(am_path(state, "patch")) && !repo_index_has_changes(the_repository, NULL, NULL)) strbuf_addf(&sb, _("To record the empty patch as an empty commit, run \"%s --allow-empty\".\n"), cmdline); strbuf_addf(&sb, _("To restore the original branch and stop patching, run \"%s --abort\"."), cmdline); advise_if_enabled(ADVICE_MERGE_CONFLICT, "%s", sb.buf); strbuf_release(&sb); } exit(128); } /** * Appends signoff to the "msg" field of the am_state. */ static void am_append_signoff(struct am_state *state) { struct strbuf sb = STRBUF_INIT; strbuf_attach(&sb, state->msg, state->msg_len, state->msg_len); append_signoff(&sb, 0, 0); state->msg = strbuf_detach(&sb, &state->msg_len); } /** * Parses `mail` using git-mailinfo, extracting its patch and authorship info. * state->msg will be set to the patch message. state->author_name, * state->author_email and state->author_date will be set to the patch author's * name, email and date respectively. The patch body will be written to the * state directory's "patch" file. * * Returns 1 if the patch should be skipped, 0 otherwise. */ static int parse_mail(struct am_state *state, const char *mail) { FILE *fp; struct strbuf sb = STRBUF_INIT; struct strbuf msg = STRBUF_INIT; struct strbuf author_name = STRBUF_INIT; struct strbuf author_date = STRBUF_INIT; struct strbuf author_email = STRBUF_INIT; int ret = 0; struct mailinfo mi; setup_mailinfo(&mi); if (state->utf8) mi.metainfo_charset = get_commit_output_encoding(); else mi.metainfo_charset = NULL; switch (state->keep) { case KEEP_FALSE: break; case KEEP_TRUE: mi.keep_subject = 1; break; case KEEP_NON_PATCH: mi.keep_non_patch_brackets_in_subject = 1; break; default: BUG("invalid value for state->keep"); } if (state->message_id) mi.add_message_id = 1; switch (state->scissors) { case SCISSORS_UNSET: break; case SCISSORS_FALSE: mi.use_scissors = 0; break; case SCISSORS_TRUE: mi.use_scissors = 1; break; default: BUG("invalid value for state->scissors"); } switch (state->quoted_cr) { case quoted_cr_unset: break; case quoted_cr_nowarn: case quoted_cr_warn: case quoted_cr_strip: mi.quoted_cr = state->quoted_cr; break; default: BUG("invalid value for state->quoted_cr"); } mi.input = xfopen(mail, "r"); mi.output = xfopen(am_path(state, "info"), "w"); if (mailinfo(&mi, am_path(state, "msg"), am_path(state, "patch"))) die("could not parse patch"); fclose(mi.input); fclose(mi.output); if (mi.format_flowed) warning(_("Patch sent with format=flowed; " "space at the end of lines might be lost.")); /* Extract message and author information */ fp = xfopen(am_path(state, "info"), "r"); while (!strbuf_getline_lf(&sb, fp)) { const char *x; if (skip_prefix(sb.buf, "Subject: ", &x)) { if (msg.len) strbuf_addch(&msg, '\n'); strbuf_addstr(&msg, x); } else if (skip_prefix(sb.buf, "Author: ", &x)) strbuf_addstr(&author_name, x); else if (skip_prefix(sb.buf, "Email: ", &x)) strbuf_addstr(&author_email, x); else if (skip_prefix(sb.buf, "Date: ", &x)) strbuf_addstr(&author_date, x); } fclose(fp); /* Skip pine's internal folder data */ if (!strcmp(author_name.buf, "Mail System Internal Data")) { ret = 1; goto finish; } strbuf_addstr(&msg, "\n\n"); strbuf_addbuf(&msg, &mi.log_message); strbuf_stripspace(&msg, NULL); assert(!state->author_name); state->author_name = strbuf_detach(&author_name, NULL); assert(!state->author_email); state->author_email = strbuf_detach(&author_email, NULL); assert(!state->author_date); state->author_date = strbuf_detach(&author_date, NULL); assert(!state->msg); state->msg = strbuf_detach(&msg, &state->msg_len); finish: strbuf_release(&msg); strbuf_release(&author_date); strbuf_release(&author_email); strbuf_release(&author_name); strbuf_release(&sb); clear_mailinfo(&mi); return ret; } /** * Sets commit_id to the commit hash where the mail was generated from. * Returns 0 on success, -1 on failure. */ static int get_mail_commit_oid(struct object_id *commit_id, const char *mail) { struct strbuf sb = STRBUF_INIT; FILE *fp = xfopen(mail, "r"); const char *x; int ret = 0; if (strbuf_getline_lf(&sb, fp) || !skip_prefix(sb.buf, "From ", &x) || get_oid_hex(x, commit_id) < 0) ret = -1; strbuf_release(&sb); fclose(fp); return ret; } /** * Sets state->msg, state->author_name, state->author_email, state->author_date * to the commit's respective info. */ static void get_commit_info(struct am_state *state, struct commit *commit) { const char *buffer, *ident_line, *msg; size_t ident_len; struct ident_split id; buffer = repo_logmsg_reencode(the_repository, commit, NULL, get_commit_output_encoding()); ident_line = find_commit_header(buffer, "author", &ident_len); if (!ident_line) die(_("missing author line in commit %s"), oid_to_hex(&commit->object.oid)); if (split_ident_line(&id, ident_line, ident_len) < 0) die(_("invalid ident line: %.*s"), (int)ident_len, ident_line); assert(!state->author_name); if (id.name_begin) state->author_name = xmemdupz(id.name_begin, id.name_end - id.name_begin); else state->author_name = xstrdup(""); assert(!state->author_email); if (id.mail_begin) state->author_email = xmemdupz(id.mail_begin, id.mail_end - id.mail_begin); else state->author_email = xstrdup(""); assert(!state->author_date); state->author_date = xstrdup(show_ident_date(&id, DATE_MODE(NORMAL))); assert(!state->msg); msg = strstr(buffer, "\n\n"); if (!msg) die(_("unable to parse commit %s"), oid_to_hex(&commit->object.oid)); state->msg = xstrdup(msg + 2); state->msg_len = strlen(state->msg); repo_unuse_commit_buffer(the_repository, commit, buffer); } /** * Writes `commit` as a patch to the state directory's "patch" file. */ static void write_commit_patch(const struct am_state *state, struct commit *commit) { struct rev_info rev_info; FILE *fp; fp = xfopen(am_path(state, "patch"), "w"); repo_init_revisions(the_repository, &rev_info, NULL); rev_info.diff = 1; rev_info.abbrev = 0; rev_info.disable_stdin = 1; rev_info.show_root_diff = 1; rev_info.diffopt.output_format = DIFF_FORMAT_PATCH; rev_info.no_commit_id = 1; rev_info.diffopt.flags.binary = 1; rev_info.diffopt.flags.full_index = 1; rev_info.diffopt.use_color = 0; rev_info.diffopt.file = fp; rev_info.diffopt.close_file = 1; add_pending_object(&rev_info, &commit->object, ""); diff_setup_done(&rev_info.diffopt); log_tree_commit(&rev_info, commit); release_revisions(&rev_info); } /** * Writes the diff of the index against HEAD as a patch to the state * directory's "patch" file. */ static void write_index_patch(const struct am_state *state) { struct tree *tree; struct object_id head; struct rev_info rev_info; FILE *fp; if (!repo_get_oid(the_repository, "HEAD", &head)) { struct commit *commit = lookup_commit_or_die(&head, "HEAD"); tree = repo_get_commit_tree(the_repository, commit); } else tree = lookup_tree(the_repository, the_repository->hash_algo->empty_tree); fp = xfopen(am_path(state, "patch"), "w"); repo_init_revisions(the_repository, &rev_info, NULL); rev_info.diff = 1; rev_info.disable_stdin = 1; rev_info.no_commit_id = 1; rev_info.diffopt.output_format = DIFF_FORMAT_PATCH; rev_info.diffopt.use_color = 0; rev_info.diffopt.file = fp; rev_info.diffopt.close_file = 1; add_pending_object(&rev_info, &tree->object, ""); diff_setup_done(&rev_info.diffopt); run_diff_index(&rev_info, DIFF_INDEX_CACHED); release_revisions(&rev_info); } /** * Like parse_mail(), but parses the mail by looking up its commit ID * directly. This is used in --rebasing mode to bypass git-mailinfo's munging * of patches. * * state->orig_commit will be set to the original commit ID. * * Will always return 0 as the patch should never be skipped. */ static int parse_mail_rebase(struct am_state *state, const char *mail) { struct commit *commit; struct object_id commit_oid; if (get_mail_commit_oid(&commit_oid, mail) < 0) die(_("could not parse %s"), mail); commit = lookup_commit_or_die(&commit_oid, mail); get_commit_info(state, commit); write_commit_patch(state, commit); oidcpy(&state->orig_commit, &commit_oid); write_state_text(state, "original-commit", oid_to_hex(&commit_oid)); refs_update_ref(get_main_ref_store(the_repository), "am", "REBASE_HEAD", &commit_oid, NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR); return 0; } /** * Applies current patch with git-apply. Returns 0 on success, -1 otherwise. If * `index_file` is not NULL, the patch will be applied to that index. */ static int run_apply(const struct am_state *state, const char *index_file) { struct strvec apply_paths = STRVEC_INIT; struct strvec apply_opts = STRVEC_INIT; struct apply_state apply_state; int res, opts_left; int force_apply = 0; int options = 0; const char **apply_argv; if (init_apply_state(&apply_state, the_repository, NULL)) BUG("init_apply_state() failed"); strvec_push(&apply_opts, "apply"); strvec_pushv(&apply_opts, state->git_apply_opts.v); /* * Build a copy that apply_parse_options() can rearrange. * apply_opts.v keeps referencing the allocated strings for * strvec_clear() to release. */ DUP_ARRAY(apply_argv, apply_opts.v, apply_opts.nr); opts_left = apply_parse_options(apply_opts.nr, apply_argv, &apply_state, &force_apply, &options, NULL); if (opts_left != 0) die("unknown option passed through to git apply"); if (index_file) { apply_state.index_file = index_file; apply_state.cached = 1; } else apply_state.check_index = 1; /* * If we are allowed to fall back on 3-way merge, don't give false * errors during the initial attempt. */ if (state->threeway && !index_file) apply_state.apply_verbosity = verbosity_silent; if (check_apply_state(&apply_state, force_apply)) BUG("check_apply_state() failed"); strvec_push(&apply_paths, am_path(state, "patch")); res = apply_all_patches(&apply_state, apply_paths.nr, apply_paths.v, options); strvec_clear(&apply_paths); strvec_clear(&apply_opts); clear_apply_state(&apply_state); free(apply_argv); if (res) return res; if (index_file) { /* Reload index as apply_all_patches() will have modified it. */ discard_index(the_repository->index); read_index_from(the_repository->index, index_file, repo_get_git_dir(the_repository)); } return 0; } /** * Builds an index that contains just the blobs needed for a 3way merge. */ static int build_fake_ancestor(const struct am_state *state, const char *index_file) { struct child_process cp = CHILD_PROCESS_INIT; cp.git_cmd = 1; strvec_push(&cp.args, "apply"); strvec_pushv(&cp.args, state->git_apply_opts.v); strvec_pushf(&cp.args, "--build-fake-ancestor=%s", index_file); strvec_push(&cp.args, am_path(state, "patch")); if (run_command(&cp)) return -1; return 0; } /** * Attempt a threeway merge, using index_path as the temporary index. */ static int fall_back_threeway(const struct am_state *state, const char *index_path) { struct object_id their_tree, our_tree; struct object_id bases[1] = { 0 }; struct merge_options o; struct commit *result; char *their_tree_name; if (repo_get_oid(the_repository, "HEAD", &our_tree) < 0) oidcpy(&our_tree, the_hash_algo->empty_tree); if (build_fake_ancestor(state, index_path)) return error("could not build fake ancestor"); discard_index(the_repository->index); read_index_from(the_repository->index, index_path, repo_get_git_dir(the_repository)); if (write_index_as_tree(&bases[0], the_repository->index, index_path, 0, NULL)) return error(_("Repository lacks necessary blobs to fall back on 3-way merge.")); say(state, stdout, _("Using index info to reconstruct a base tree...")); if (!state->quiet) { /* * List paths that needed 3-way fallback, so that the user can * review them with extra care to spot mismerges. */ struct rev_info rev_info; repo_init_revisions(the_repository, &rev_info, NULL); rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS; rev_info.diffopt.filter |= diff_filter_bit('A'); rev_info.diffopt.filter |= diff_filter_bit('M'); add_pending_oid(&rev_info, "HEAD", &our_tree, 0); diff_setup_done(&rev_info.diffopt); run_diff_index(&rev_info, DIFF_INDEX_CACHED); release_revisions(&rev_info); } if (run_apply(state, index_path)) return error(_("Did you hand edit your patch?\n" "It does not apply to blobs recorded in its index.")); if (write_index_as_tree(&their_tree, the_repository->index, index_path, 0, NULL)) return error("could not write tree"); say(state, stdout, _("Falling back to patching base and 3-way merge...")); discard_index(the_repository->index); repo_read_index(the_repository); /* * This is not so wrong. Depending on which base we picked, orig_tree * may be wildly different from ours, but their_tree has the same set of * wildly different changes in parts the patch did not touch, so * recursive ends up canceling them, saying that we reverted all those * changes. */ init_ui_merge_options(&o, the_repository); o.branch1 = "HEAD"; their_tree_name = xstrfmt("%.*s", linelen(state->msg), state->msg); o.branch2 = their_tree_name; o.detect_directory_renames = MERGE_DIRECTORY_RENAMES_NONE; if (state->quiet) o.verbosity = 0; if (merge_recursive_generic(&o, &our_tree, &their_tree, 1, bases, &result)) { repo_rerere(the_repository, state->allow_rerere_autoupdate); free(their_tree_name); return error(_("Failed to merge in the changes.")); } free(their_tree_name); return 0; } /** * Commits the current index with state->msg as the commit message and * state->author_name, state->author_email and state->author_date as the author * information. */ static void do_commit(const struct am_state *state) { struct object_id tree, parent, commit; const struct object_id *old_oid; struct commit_list *parents = NULL; const char *reflog_msg, *author, *committer = NULL; struct strbuf sb = STRBUF_INIT; if (!state->no_verify && run_hooks(the_repository, "pre-applypatch")) exit(1); if (write_index_as_tree(&tree, the_repository->index, repo_get_index_file(the_repository), 0, NULL)) die(_("git write-tree failed to write a tree")); if (!repo_get_oid_commit(the_repository, "HEAD", &parent)) { old_oid = &parent; commit_list_insert(lookup_commit(the_repository, &parent), &parents); } else { old_oid = NULL; say(state, stderr, _("applying to an empty history")); } author = fmt_ident(state->author_name, state->author_email, WANT_AUTHOR_IDENT, state->ignore_date ? NULL : state->author_date, IDENT_STRICT); if (state->committer_date_is_author_date) committer = fmt_ident(getenv("GIT_COMMITTER_NAME"), getenv("GIT_COMMITTER_EMAIL"), WANT_COMMITTER_IDENT, state->ignore_date ? NULL : state->author_date, IDENT_STRICT); if (commit_tree_extended(state->msg, state->msg_len, &tree, parents, &commit, author, committer, state->sign_commit, NULL)) die(_("failed to write commit object")); reflog_msg = getenv("GIT_REFLOG_ACTION"); if (!reflog_msg) reflog_msg = "am"; strbuf_addf(&sb, "%s: %.*s", reflog_msg, linelen(state->msg), state->msg); refs_update_ref(get_main_ref_store(the_repository), sb.buf, "HEAD", &commit, old_oid, 0, UPDATE_REFS_DIE_ON_ERR); if (state->rebasing) { FILE *fp = xfopen(am_path(state, "rewritten"), "a"); assert(!is_null_oid(&state->orig_commit)); fprintf(fp, "%s ", oid_to_hex(&state->orig_commit)); fprintf(fp, "%s\n", oid_to_hex(&commit)); fclose(fp); } run_hooks(the_repository, "post-applypatch"); free_commit_list(parents); strbuf_release(&sb); } /** * Validates the am_state for resuming -- the "msg" and authorship fields must * be filled up. */ static void validate_resume_state(const struct am_state *state) { if (!state->msg) die(_("cannot resume: %s does not exist."), am_path(state, "final-commit")); if (!state->author_name || !state->author_email || !state->author_date) die(_("cannot resume: %s does not exist."), am_path(state, "author-script")); } /** * Interactively prompt the user on whether the current patch should be * applied. * * Returns 0 if the user chooses to apply the patch, 1 if the user chooses to * skip it. */ static int do_interactive(struct am_state *state) { assert(state->msg); for (;;) { char reply[64]; puts(_("Commit Body is:")); puts("--------------------------"); printf("%s", state->msg); puts("--------------------------"); /* * TRANSLATORS: Make sure to include [y], [n], [e], [v] and [a] * in your translation. The program will only accept English * input at this point. */ printf(_("Apply? [y]es/[n]o/[e]dit/[v]iew patch/[a]ccept all: ")); if (!fgets(reply, sizeof(reply), stdin)) die("unable to read from stdin; aborting"); if (*reply == 'y' || *reply == 'Y') { return 0; } else if (*reply == 'a' || *reply == 'A') { state->interactive = 0; return 0; } else if (*reply == 'n' || *reply == 'N') { return 1; } else if (*reply == 'e' || *reply == 'E') { struct strbuf msg = STRBUF_INIT; if (!launch_editor(am_path(state, "final-commit"), &msg, NULL)) { free(state->msg); state->msg = strbuf_detach(&msg, &state->msg_len); } strbuf_release(&msg); } else if (*reply == 'v' || *reply == 'V') { const char *pager = git_pager(1); struct child_process cp = CHILD_PROCESS_INIT; if (!pager) pager = "cat"; prepare_pager_args(&cp, pager); strvec_push(&cp.args, am_path(state, "patch")); run_command(&cp); } } } /** * Applies all queued mail. * * If `resume` is true, we are "resuming". The "msg" and authorship fields, as * well as the state directory's "patch" file is used as-is for applying the * patch and committing it. */ static void am_run(struct am_state *state, int resume) { struct strbuf sb = STRBUF_INIT; unlink(am_path(state, "dirtyindex")); if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0, NULL, NULL, NULL) < 0) die(_("unable to write index file")); if (repo_index_has_changes(the_repository, NULL, &sb)) { write_state_bool(state, "dirtyindex", 1); die(_("Dirty index: cannot apply patches (dirty: %s)"), sb.buf); } strbuf_release(&sb); while (state->cur <= state->last) { const char *mail = am_path(state, msgnum(state)); int apply_status; int to_keep; reset_ident_date(); if (!file_exists(mail)) goto next; if (resume) { validate_resume_state(state); } else { int skip; if (state->rebasing) skip = parse_mail_rebase(state, mail); else skip = parse_mail(state, mail); if (skip) goto next; /* mail should be skipped */ if (state->signoff) am_append_signoff(state); write_author_script(state); write_commit_msg(state); } if (state->interactive && do_interactive(state)) goto next; to_keep = 0; if (is_empty_or_missing_file(am_path(state, "patch"))) { switch (state->empty_type) { case DROP_EMPTY_COMMIT: say(state, stdout, _("Skipping: %.*s"), linelen(state->msg), state->msg); goto next; break; case KEEP_EMPTY_COMMIT: to_keep = 1; say(state, stdout, _("Creating an empty commit: %.*s"), linelen(state->msg), state->msg); break; case STOP_ON_EMPTY_COMMIT: printf_ln(_("Patch is empty.")); die_user_resolve(state); break; } } if (run_applypatch_msg_hook(state)) exit(1); if (to_keep) goto commit; say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg); apply_status = run_apply(state, NULL); if (apply_status && state->threeway) { struct strbuf sb = STRBUF_INIT; strbuf_addstr(&sb, am_path(state, "patch-merge-index")); apply_status = fall_back_threeway(state, sb.buf); strbuf_release(&sb); /* * Applying the patch to an earlier tree and merging * the result may have produced the same tree as ours. */ if (!apply_status && !repo_index_has_changes(the_repository, NULL, NULL)) { say(state, stdout, _("No changes -- Patch already applied.")); goto next; } } if (apply_status) { printf_ln(_("Patch failed at %s %.*s"), msgnum(state), linelen(state->msg), state->msg); if (advice_enabled(ADVICE_AM_WORK_DIR)) advise(_("Use 'git am --show-current-patch=diff' to see the failed patch")); die_user_resolve(state); } commit: do_commit(state); next: am_next(state); if (resume) am_load(state); resume = 0; } if (!is_empty_or_missing_file(am_path(state, "rewritten"))) { assert(state->rebasing); copy_notes_for_rebase(state); run_post_rewrite_hook(state); } /* * In rebasing mode, it's up to the caller to take care of * housekeeping. */ if (!state->rebasing) { am_destroy(state); run_auto_maintenance(state->quiet); } } /** * Resume the current am session after patch application failure. The user did * all the hard work, and we do not have to do any patch application. Just * trust and commit what the user has in the index and working tree. If `allow_empty` * is true, commit as an empty commit when index has not changed and lacking a patch. */ static void am_resolve(struct am_state *state, int allow_empty) { validate_resume_state(state); say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg); if (!repo_index_has_changes(the_repository, NULL, NULL)) { if (allow_empty && is_empty_or_missing_file(am_path(state, "patch"))) { printf_ln(_("No changes - recorded it as an empty commit.")); } else { printf_ln(_("No changes - did you forget to use 'git add'?\n" "If there is nothing left to stage, chances are that something else\n" "already introduced the same changes; you might want to skip this patch.")); die_user_resolve(state); } } if (unmerged_index(the_repository->index)) { printf_ln(_("You still have unmerged paths in your index.\n" "You should 'git add' each file with resolved conflicts to mark them as such.\n" "You might run `git rm` on a file to accept \"deleted by them\" for it.")); die_user_resolve(state); } if (state->interactive) { write_index_patch(state); if (do_interactive(state)) goto next; } repo_rerere(the_repository, 0); do_commit(state); next: am_next(state); am_load(state); am_run(state, 0); } /** * Performs a checkout fast-forward from `head` to `remote`. If `reset` is * true, any unmerged entries will be discarded. Returns 0 on success, -1 on * failure. */ static int fast_forward_to(struct tree *head, struct tree *remote, int reset) { struct lock_file lock_file = LOCK_INIT; struct unpack_trees_options opts; struct tree_desc t[2]; if (parse_tree(head) || parse_tree(remote)) return -1; repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR); refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL); memset(&opts, 0, sizeof(opts)); opts.head_idx = 1; opts.src_index = the_repository->index; opts.dst_index = the_repository->index; opts.update = 1; opts.merge = 1; opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0; opts.preserve_ignored = 0; /* FIXME: !overwrite_ignore */ opts.fn = twoway_merge; init_tree_desc(&t[0], &head->object.oid, head->buffer, head->size); init_tree_desc(&t[1], &remote->object.oid, remote->buffer, remote->size); if (unpack_trees(2, t, &opts)) { rollback_lock_file(&lock_file); return -1; } if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK)) die(_("unable to write new index file")); return 0; } /** * Merges a tree into the index. The index's stat info will take precedence * over the merged tree's. Returns 0 on success, -1 on failure. */ static int merge_tree(struct tree *tree) { struct lock_file lock_file = LOCK_INIT; struct unpack_trees_options opts; struct tree_desc t[1]; if (parse_tree(tree)) return -1; repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR); memset(&opts, 0, sizeof(opts)); opts.head_idx = 1; opts.src_index = the_repository->index; opts.dst_index = the_repository->index; opts.merge = 1; opts.fn = oneway_merge; init_tree_desc(&t[0], &tree->object.oid, tree->buffer, tree->size); if (unpack_trees(1, t, &opts)) { rollback_lock_file(&lock_file); return -1; } if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK)) die(_("unable to write new index file")); return 0; } /** * Clean the index without touching entries that are not modified between * `head` and `remote`. */ static int clean_index(const struct object_id *head, const struct object_id *remote) { struct tree *head_tree, *remote_tree, *index_tree; struct object_id index; head_tree = parse_tree_indirect(head); if (!head_tree) return error(_("Could not parse object '%s'."), oid_to_hex(head)); remote_tree = parse_tree_indirect(remote); if (!remote_tree) return error(_("Could not parse object '%s'."), oid_to_hex(remote)); repo_read_index_unmerged(the_repository); if (fast_forward_to(head_tree, head_tree, 1)) return -1; if (write_index_as_tree(&index, the_repository->index, repo_get_index_file(the_repository), 0, NULL)) return -1; index_tree = parse_tree_indirect(&index); if (!index_tree) return error(_("Could not parse object '%s'."), oid_to_hex(&index)); if (fast_forward_to(index_tree, remote_tree, 0)) return -1; if (merge_tree(remote_tree)) return -1; remove_branch_state(the_repository, 0); return 0; } /** * Resets rerere's merge resolution metadata. */ static void am_rerere_clear(void) { struct string_list merge_rr = STRING_LIST_INIT_DUP; rerere_clear(the_repository, &merge_rr); string_list_clear(&merge_rr, 1); } /** * Resume the current am session by skipping the current patch. */ static void am_skip(struct am_state *state) { struct object_id head; am_rerere_clear(); if (repo_get_oid(the_repository, "HEAD", &head)) oidcpy(&head, the_hash_algo->empty_tree); if (clean_index(&head, &head)) die(_("failed to clean index")); if (state->rebasing) { FILE *fp = xfopen(am_path(state, "rewritten"), "a"); assert(!is_null_oid(&state->orig_commit)); fprintf(fp, "%s ", oid_to_hex(&state->orig_commit)); fprintf(fp, "%s\n", oid_to_hex(&head)); fclose(fp); } am_next(state); am_load(state); am_run(state, 0); } /** * Returns true if it is safe to reset HEAD to the ORIG_HEAD, false otherwise. * * It is not safe to reset HEAD when: * 1. git-am previously failed because the index was dirty. * 2. HEAD has moved since git-am previously failed. */ static int safe_to_abort(const struct am_state *state) { struct strbuf sb = STRBUF_INIT; struct object_id abort_safety, head; if (file_exists(am_path(state, "dirtyindex"))) return 0; if (read_state_file(&sb, state, "abort-safety", 1) > 0) { if (get_oid_hex(sb.buf, &abort_safety)) die(_("could not parse %s"), am_path(state, "abort-safety")); } else oidclr(&abort_safety, the_repository->hash_algo); strbuf_release(&sb); if (repo_get_oid(the_repository, "HEAD", &head)) oidclr(&head, the_repository->hash_algo); if (oideq(&head, &abort_safety)) return 1; warning(_("You seem to have moved HEAD since the last 'am' failure.\n" "Not rewinding to ORIG_HEAD")); return 0; } /** * Aborts the current am session if it is safe to do so. */ static void am_abort(struct am_state *state) { struct object_id curr_head, orig_head; int has_curr_head, has_orig_head; char *curr_branch; if (!safe_to_abort(state)) { am_destroy(state); return; } am_rerere_clear(); curr_branch = refs_resolve_refdup(get_main_ref_store(the_repository), "HEAD", 0, &curr_head, NULL); has_curr_head = curr_branch && !is_null_oid(&curr_head); if (!has_curr_head) oidcpy(&curr_head, the_hash_algo->empty_tree); has_orig_head = !repo_get_oid(the_repository, "ORIG_HEAD", &orig_head); if (!has_orig_head) oidcpy(&orig_head, the_hash_algo->empty_tree); if (clean_index(&curr_head, &orig_head)) die(_("failed to clean index")); if (has_orig_head) refs_update_ref(get_main_ref_store(the_repository), "am --abort", "HEAD", &orig_head, has_curr_head ? &curr_head : NULL, 0, UPDATE_REFS_DIE_ON_ERR); else if (curr_branch) refs_delete_ref(get_main_ref_store(the_repository), NULL, curr_branch, NULL, REF_NO_DEREF); free(curr_branch); am_destroy(state); } static int show_patch(struct am_state *state, enum resume_type resume_mode) { struct strbuf sb = STRBUF_INIT; const char *patch_path; int len; if (!is_null_oid(&state->orig_commit)) { struct child_process cmd = CHILD_PROCESS_INIT; strvec_pushl(&cmd.args, "show", oid_to_hex(&state->orig_commit), "--", NULL); cmd.git_cmd = 1; return run_command(&cmd); } switch (resume_mode) { case RESUME_SHOW_PATCH_RAW: patch_path = am_path(state, msgnum(state)); break; case RESUME_SHOW_PATCH_DIFF: patch_path = am_path(state, "patch"); break; default: BUG("invalid mode for --show-current-patch"); } len = strbuf_read_file(&sb, patch_path, 0); if (len < 0) die_errno(_("failed to read '%s'"), patch_path); setup_pager(); write_in_full(1, sb.buf, sb.len); strbuf_release(&sb); return 0; } /** * parse_options() callback that validates and sets opt->value to the * PATCH_FORMAT_* enum value corresponding to `arg`. */ static int parse_opt_patchformat(const struct option *opt, const char *arg, int unset) { int *opt_value = opt->value; if (unset) *opt_value = PATCH_FORMAT_UNKNOWN; else if (!strcmp(arg, "mbox")) *opt_value = PATCH_FORMAT_MBOX; else if (!strcmp(arg, "stgit")) *opt_value = PATCH_FORMAT_STGIT; else if (!strcmp(arg, "stgit-series")) *opt_value = PATCH_FORMAT_STGIT_SERIES; else if (!strcmp(arg, "hg")) *opt_value = PATCH_FORMAT_HG; else if (!strcmp(arg, "mboxrd")) *opt_value = PATCH_FORMAT_MBOXRD; /* * Please update $__git_patchformat in git-completion.bash * when you add new options */ else return error(_("invalid value for '%s': '%s'"), "--patch-format", arg); return 0; } static int parse_opt_show_current_patch(const struct option *opt, const char *arg, int unset) { int *opt_value = opt->value; BUG_ON_OPT_NEG(unset); if (!arg) *opt_value = opt->defval; else if (!strcmp(arg, "raw")) *opt_value = RESUME_SHOW_PATCH_RAW; else if (!strcmp(arg, "diff")) *opt_value = RESUME_SHOW_PATCH_DIFF; /* * Please update $__git_showcurrentpatch in git-completion.bash * when you add new options */ else return error(_("invalid value for '%s': '%s'"), "--show-current-patch", arg); return 0; } int cmd_am(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct am_state state; int binary = -1; int keep_cr = -1; int patch_format = PATCH_FORMAT_UNKNOWN; enum resume_type resume_mode = RESUME_FALSE; int in_progress; int ret = 0; const char * const usage[] = { N_("git am [] [( | )...]"), N_("git am [] (--continue | --skip | --abort)"), NULL }; struct option options[] = { OPT_BOOL('i', "interactive", &state.interactive, N_("run interactively")), OPT_BOOL('n', "no-verify", &state.no_verify, N_("bypass pre-applypatch and applypatch-msg hooks")), OPT_HIDDEN_BOOL('b', "binary", &binary, N_("historical option -- no-op")), OPT_BOOL('3', "3way", &state.threeway, N_("allow fall back on 3way merging if needed")), OPT__QUIET(&state.quiet, N_("be quiet")), OPT_SET_INT('s', "signoff", &state.signoff, N_("add a Signed-off-by trailer to the commit message"), SIGNOFF_EXPLICIT), OPT_BOOL('u', "utf8", &state.utf8, N_("recode into utf8 (default)")), OPT_SET_INT('k', "keep", &state.keep, N_("pass -k flag to git-mailinfo"), KEEP_TRUE), OPT_SET_INT(0, "keep-non-patch", &state.keep, N_("pass -b flag to git-mailinfo"), KEEP_NON_PATCH), OPT_BOOL('m', "message-id", &state.message_id, N_("pass -m flag to git-mailinfo")), OPT_SET_INT(0, "keep-cr", &keep_cr, N_("pass --keep-cr flag to git-mailsplit for mbox format"), 1), OPT_BOOL('c', "scissors", &state.scissors, N_("strip everything before a scissors line")), OPT_CALLBACK_F(0, "quoted-cr", &state.quoted_cr, N_("action"), N_("pass it through git-mailinfo"), PARSE_OPT_NONEG, am_option_parse_quoted_cr), OPT_PASSTHRU_ARGV(0, "whitespace", &state.git_apply_opts, N_("action"), N_("pass it through git-apply"), 0), OPT_PASSTHRU_ARGV(0, "ignore-space-change", &state.git_apply_opts, NULL, N_("pass it through git-apply"), PARSE_OPT_NOARG), OPT_PASSTHRU_ARGV(0, "ignore-whitespace", &state.git_apply_opts, NULL, N_("pass it through git-apply"), PARSE_OPT_NOARG), OPT_PASSTHRU_ARGV(0, "directory", &state.git_apply_opts, N_("root"), N_("pass it through git-apply"), 0), OPT_PASSTHRU_ARGV(0, "exclude", &state.git_apply_opts, N_("path"), N_("pass it through git-apply"), 0), OPT_PASSTHRU_ARGV(0, "include", &state.git_apply_opts, N_("path"), N_("pass it through git-apply"), 0), OPT_PASSTHRU_ARGV('C', NULL, &state.git_apply_opts, N_("n"), N_("pass it through git-apply"), 0), OPT_PASSTHRU_ARGV('p', NULL, &state.git_apply_opts, N_("num"), N_("pass it through git-apply"), 0), OPT_CALLBACK(0, "patch-format", &patch_format, N_("format"), N_("format the patch(es) are in"), parse_opt_patchformat), OPT_PASSTHRU_ARGV(0, "reject", &state.git_apply_opts, NULL, N_("pass it through git-apply"), PARSE_OPT_NOARG), OPT_STRING(0, "resolvemsg", &state.resolvemsg, NULL, N_("override error message when patch failure occurs")), OPT_CMDMODE(0, "continue", &resume_mode, N_("continue applying patches after resolving a conflict"), RESUME_RESOLVED), OPT_CMDMODE('r', "resolved", &resume_mode, N_("synonyms for --continue"), RESUME_RESOLVED), OPT_CMDMODE(0, "skip", &resume_mode, N_("skip the current patch"), RESUME_SKIP), OPT_CMDMODE(0, "abort", &resume_mode, N_("restore the original branch and abort the patching operation"), RESUME_ABORT), OPT_CMDMODE(0, "quit", &resume_mode, N_("abort the patching operation but keep HEAD where it is"), RESUME_QUIT), { OPTION_CALLBACK, 0, "show-current-patch", &resume_mode, "(diff|raw)", N_("show the patch being applied"), PARSE_OPT_CMDMODE | PARSE_OPT_OPTARG | PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP, parse_opt_show_current_patch, RESUME_SHOW_PATCH_RAW }, OPT_CMDMODE(0, "retry", &resume_mode, N_("try to apply current patch again"), RESUME_APPLY), OPT_CMDMODE(0, "allow-empty", &resume_mode, N_("record the empty patch as an empty commit"), RESUME_ALLOW_EMPTY), OPT_BOOL(0, "committer-date-is-author-date", &state.committer_date_is_author_date, N_("lie about committer date")), OPT_BOOL(0, "ignore-date", &state.ignore_date, N_("use current timestamp for author date")), OPT_RERERE_AUTOUPDATE(&state.allow_rerere_autoupdate), { OPTION_STRING, 'S', "gpg-sign", &state.sign_commit, N_("key-id"), N_("GPG-sign commits"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" }, OPT_CALLBACK_F(0, "empty", &state.empty_type, "(stop|drop|keep)", N_("how to handle empty patches"), PARSE_OPT_NONEG, am_option_parse_empty), OPT_HIDDEN_BOOL(0, "rebasing", &state.rebasing, N_("(internal use for git-rebase)")), OPT_END() }; if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(usage, options); git_config(git_default_config, NULL); am_state_init(&state); in_progress = am_in_progress(&state); if (in_progress) am_load(&state); argc = parse_options(argc, argv, prefix, options, usage, 0); if (binary >= 0) fprintf_ln(stderr, _("The -b/--binary option has been a no-op for long time, and\n" "it will be removed. Please do not use it anymore.")); /* Ensure a valid committer ident can be constructed */ git_committer_info(IDENT_STRICT); if (repo_read_index_preload(the_repository, NULL, 0) < 0) die(_("failed to read the index")); if (in_progress) { /* * Catch user error to feed us patches when there is a session * in progress: * * 1. mbox path(s) are provided on the command-line. * 2. stdin is not a tty: the user is trying to feed us a patch * from standard input. This is somewhat unreliable -- stdin * could be /dev/null for example and the caller did not * intend to feed us a patch but wanted to continue * unattended. */ if (argc || (resume_mode == RESUME_FALSE && !isatty(0))) die(_("previous rebase directory %s still exists but mbox given."), state.dir); if (resume_mode == RESUME_FALSE) resume_mode = RESUME_APPLY; if (state.signoff == SIGNOFF_EXPLICIT) am_append_signoff(&state); } else { struct strvec paths = STRVEC_INIT; int i; /* * Handle stray state directory in the independent-run case. In * the --rebasing case, it is up to the caller to take care of * stray directories. */ if (file_exists(state.dir) && !state.rebasing) { if (resume_mode == RESUME_ABORT || resume_mode == RESUME_QUIT) { am_destroy(&state); am_state_release(&state); return 0; } die(_("Stray %s directory found.\n" "Use \"git am --abort\" to remove it."), state.dir); } if (resume_mode) die(_("Resolve operation not in progress, we are not resuming.")); for (i = 0; i < argc; i++) { if (is_absolute_path(argv[i]) || !prefix) strvec_push(&paths, argv[i]); else strvec_push(&paths, mkpath("%s/%s", prefix, argv[i])); } if (state.interactive && !paths.nr) die(_("interactive mode requires patches on the command line")); am_setup(&state, patch_format, paths.v, keep_cr); strvec_clear(&paths); } switch (resume_mode) { case RESUME_FALSE: am_run(&state, 0); break; case RESUME_APPLY: am_run(&state, 1); break; case RESUME_RESOLVED: case RESUME_ALLOW_EMPTY: am_resolve(&state, resume_mode == RESUME_ALLOW_EMPTY ? 1 : 0); break; case RESUME_SKIP: am_skip(&state); break; case RESUME_ABORT: am_abort(&state); break; case RESUME_QUIT: am_rerere_clear(); am_destroy(&state); break; case RESUME_SHOW_PATCH_RAW: case RESUME_SHOW_PATCH_DIFF: ret = show_patch(&state, resume_mode); break; default: BUG("invalid resume value"); } am_state_release(&state); return ret; } git-cinnabar-0.7.0/git-core/builtin/annotate.c000064400000000000000000000013461046102023000173260ustar 00000000000000/* * "git annotate" builtin alias * * Copyright (C) 2006 Ryan Anderson */ #include "git-compat-util.h" #include "builtin.h" #include "strvec.h" int cmd_annotate(int argc, const char **argv, const char *prefix, struct repository *repo) { struct strvec args = STRVEC_INIT; const char **args_copy; int ret; strvec_pushl(&args, "annotate", "-c", NULL); for (int i = 1; i < argc; i++) strvec_push(&args, argv[i]); /* * `cmd_blame()` ends up modifying the array, which causes memory leaks * if we didn't copy the array here. */ CALLOC_ARRAY(args_copy, args.nr + 1); COPY_ARRAY(args_copy, args.v, args.nr); ret = cmd_blame(args.nr, args_copy, prefix, repo); strvec_clear(&args); free(args_copy); return ret; } git-cinnabar-0.7.0/git-core/builtin/apply.c000064400000000000000000000020071046102023000166350ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "gettext.h" #include "hash.h" #include "apply.h" static const char * const apply_usage[] = { N_("git apply [] [...]"), NULL }; int cmd_apply(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int force_apply = 0; int options = 0; int ret; struct apply_state state; if (init_apply_state(&state, the_repository, prefix)) exit(128); /* * We could to redo the "apply.c" machinery to make this * arbitrary fallback unnecessary, but it is dubious that it * is worth the effort. * cf. https://lore.kernel.org/git/xmqqcypfcmn4.fsf@gitster.g/ */ if (!the_hash_algo) repo_set_hash_algo(the_repository, GIT_HASH_SHA1); argc = apply_parse_options(argc, argv, &state, &force_apply, &options, apply_usage); if (check_apply_state(&state, force_apply)) exit(128); ret = apply_all_patches(&state, argc, argv, options); clear_apply_state(&state); return ret; } git-cinnabar-0.7.0/git-core/builtin/archive.c000064400000000000000000000060431046102023000171350ustar 00000000000000/* * Copyright (c) 2006 Franck Bui-Huu * Copyright (c) 2006 Rene Scharfe */ #include "builtin.h" #include "archive.h" #include "gettext.h" #include "transport.h" #include "parse-options.h" #include "pkt-line.h" static void create_output_file(const char *output_file) { int output_fd = xopen(output_file, O_CREAT | O_WRONLY | O_TRUNC, 0666); if (output_fd != 1) { if (dup2(output_fd, 1) < 0) die_errno(_("could not redirect output")); else close(output_fd); } } static int run_remote_archiver(int argc, const char **argv, const char *remote, const char *exec, const char *name_hint) { int fd[2], i, rv; struct transport *transport; struct remote *_remote; struct packet_reader reader; _remote = remote_get(remote); transport = transport_get(_remote, _remote->url.v[0]); transport_connect(transport, "git-upload-archive", exec, fd); /* * Inject a fake --format field at the beginning of the * arguments, with the format inferred from our output * filename. This way explicit --format options can override * it. */ if (name_hint) { const char *format = archive_format_from_filename(name_hint); if (format) packet_write_fmt(fd[1], "argument --format=%s\n", format); } for (i = 1; i < argc; i++) packet_write_fmt(fd[1], "argument %s\n", argv[i]); packet_flush(fd[1]); packet_reader_init(&reader, fd[0], NULL, 0, PACKET_READ_CHOMP_NEWLINE | PACKET_READ_DIE_ON_ERR_PACKET); if (packet_reader_read(&reader) != PACKET_READ_NORMAL) die(_("git archive: expected ACK/NAK, got a flush packet")); if (strcmp(reader.line, "ACK")) { if (starts_with(reader.line, "NACK ")) die(_("git archive: NACK %s"), reader.line + 5); die(_("git archive: protocol error")); } if (packet_reader_read(&reader) != PACKET_READ_FLUSH) die(_("git archive: expected a flush")); /* Now, start reading from fd[0] and spit it out to stdout */ rv = recv_sideband("archive", fd[0], 1); rv |= transport_disconnect(transport); return !!rv; } #define PARSE_OPT_KEEP_ALL ( PARSE_OPT_KEEP_DASHDASH | \ PARSE_OPT_KEEP_ARGV0 | \ PARSE_OPT_KEEP_UNKNOWN_OPT | \ PARSE_OPT_NO_INTERNAL_HELP ) int cmd_archive(int argc, const char **argv, const char *prefix, struct repository *repo) { const char *exec = "git-upload-archive"; char *output = NULL; const char *remote = NULL; struct option local_opts[] = { OPT_FILENAME('o', "output", &output, N_("write the archive to this file")), OPT_STRING(0, "remote", &remote, N_("repo"), N_("retrieve the archive from remote repository ")), OPT_STRING(0, "exec", &exec, N_("command"), N_("path to the remote git-upload-archive command")), OPT_END() }; int ret; argc = parse_options(argc, argv, prefix, local_opts, NULL, PARSE_OPT_KEEP_ALL); init_archivers(); if (output) create_output_file(output); if (remote) { ret = run_remote_archiver(argc, argv, remote, exec, output); goto out; } setvbuf(stderr, NULL, _IOLBF, BUFSIZ); ret = write_archive(argc, argv, prefix, repo, output, 0); out: free(output); return ret; } git-cinnabar-0.7.0/git-core/builtin/backfill.c000064400000000000000000000070441046102023000172650ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE /* for core_apply_sparse_checkout */ #include "builtin.h" #include "git-compat-util.h" #include "config.h" #include "parse-options.h" #include "repository.h" #include "commit.h" #include "dir.h" #include "environment.h" #include "hex.h" #include "tree.h" #include "tree-walk.h" #include "object.h" #include "object-store-ll.h" #include "oid-array.h" #include "oidset.h" #include "promisor-remote.h" #include "strmap.h" #include "string-list.h" #include "revision.h" #include "trace2.h" #include "progress.h" #include "packfile.h" #include "path-walk.h" static const char * const builtin_backfill_usage[] = { N_("(EXPERIMENTAL) git backfill [--batch-size=] [--[no-]sparse]"), NULL }; struct backfill_context { struct repository *repo; struct oid_array current_batch; size_t batch_size; int sparse; }; static void clear_backfill_context(struct backfill_context *ctx) { oid_array_clear(&ctx->current_batch); } static void download_batch(struct backfill_context *ctx) { promisor_remote_get_direct(ctx->repo, ctx->current_batch.oid, ctx->current_batch.nr); oid_array_clear(&ctx->current_batch); /* * We likely have a new packfile. Add it to the packed list to * avoid possible duplicate downloads of the same objects. */ reprepare_packed_git(ctx->repo); } static int fill_missing_blobs(const char *path UNUSED, struct oid_array *list, enum object_type type, void *data) { struct backfill_context *ctx = data; if (type != OBJ_BLOB) return 0; for (size_t i = 0; i < list->nr; i++) { off_t size = 0; struct object_info info = OBJECT_INFO_INIT; info.disk_sizep = &size; if (oid_object_info_extended(ctx->repo, &list->oid[i], &info, OBJECT_INFO_FOR_PREFETCH) || !size) oid_array_append(&ctx->current_batch, &list->oid[i]); } if (ctx->current_batch.nr >= ctx->batch_size) download_batch(ctx); return 0; } static int do_backfill(struct backfill_context *ctx) { struct rev_info revs; struct path_walk_info info = PATH_WALK_INFO_INIT; int ret; if (ctx->sparse) { CALLOC_ARRAY(info.pl, 1); if (get_sparse_checkout_patterns(info.pl)) { clear_pattern_list(info.pl); free(info.pl); return error(_("problem loading sparse-checkout")); } } repo_init_revisions(ctx->repo, &revs, ""); handle_revision_arg("HEAD", &revs, 0, 0); info.blobs = 1; info.tags = info.commits = info.trees = 0; info.revs = &revs; info.path_fn = fill_missing_blobs; info.path_fn_data = ctx; ret = walk_objects_by_path(&info); /* Download the objects that did not fill a batch. */ if (!ret) download_batch(ctx); clear_backfill_context(ctx); release_revisions(&revs); if (info.pl) { clear_pattern_list(info.pl); free(info.pl); } return ret; } int cmd_backfill(int argc, const char **argv, const char *prefix, struct repository *repo) { struct backfill_context ctx = { .repo = repo, .current_batch = OID_ARRAY_INIT, .batch_size = 50000, .sparse = 0, }; struct option options[] = { OPT_INTEGER(0, "batch-size", &ctx.batch_size, N_("Minimun number of objects to request at a time")), OPT_BOOL(0, "sparse", &ctx.sparse, N_("Restrict the missing objects to the current sparse-checkout")), OPT_END(), }; if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(builtin_backfill_usage, options); argc = parse_options(argc, argv, prefix, options, builtin_backfill_usage, 0); repo_config(repo, git_default_config, NULL); if (ctx.sparse < 0) ctx.sparse = core_apply_sparse_checkout; return do_backfill(&ctx); } git-cinnabar-0.7.0/git-core/builtin/bisect.c000064400000000000000000001123121046102023000167620ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "builtin.h" #include "copy.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "object-name.h" #include "parse-options.h" #include "bisect.h" #include "refs.h" #include "strvec.h" #include "run-command.h" #include "oid-array.h" #include "path.h" #include "prompt.h" #include "quote.h" #include "revision.h" static GIT_PATH_FUNC(git_path_bisect_terms, "BISECT_TERMS") static GIT_PATH_FUNC(git_path_bisect_ancestors_ok, "BISECT_ANCESTORS_OK") static GIT_PATH_FUNC(git_path_bisect_start, "BISECT_START") static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG") static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES") static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT") static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN") #define BUILTIN_GIT_BISECT_START_USAGE \ N_("git bisect start [--term-(new|bad)= --term-(old|good)=]" \ " [--no-checkout] [--first-parent] [ [...]] [--]" \ " [...]") #define BUILTIN_GIT_BISECT_STATE_USAGE \ N_("git bisect (good|bad) [...]") #define BUILTIN_GIT_BISECT_TERMS_USAGE \ "git bisect terms [--term-good | --term-bad]" #define BUILTIN_GIT_BISECT_SKIP_USAGE \ N_("git bisect skip [(|)...]") #define BUILTIN_GIT_BISECT_NEXT_USAGE \ "git bisect next" #define BUILTIN_GIT_BISECT_RESET_USAGE \ N_("git bisect reset []") #define BUILTIN_GIT_BISECT_VISUALIZE_USAGE \ "git bisect visualize" #define BUILTIN_GIT_BISECT_REPLAY_USAGE \ N_("git bisect replay ") #define BUILTIN_GIT_BISECT_LOG_USAGE \ "git bisect log" #define BUILTIN_GIT_BISECT_RUN_USAGE \ N_("git bisect run [...]") static const char * const git_bisect_usage[] = { BUILTIN_GIT_BISECT_START_USAGE, BUILTIN_GIT_BISECT_STATE_USAGE, BUILTIN_GIT_BISECT_TERMS_USAGE, BUILTIN_GIT_BISECT_SKIP_USAGE, BUILTIN_GIT_BISECT_NEXT_USAGE, BUILTIN_GIT_BISECT_RESET_USAGE, BUILTIN_GIT_BISECT_VISUALIZE_USAGE, BUILTIN_GIT_BISECT_REPLAY_USAGE, BUILTIN_GIT_BISECT_LOG_USAGE, BUILTIN_GIT_BISECT_RUN_USAGE, NULL }; struct add_bisect_ref_data { struct rev_info *revs; unsigned int object_flags; }; struct bisect_terms { char *term_good; char *term_bad; }; static void free_terms(struct bisect_terms *terms) { FREE_AND_NULL(terms->term_good); FREE_AND_NULL(terms->term_bad); } static void set_terms(struct bisect_terms *terms, const char *bad, const char *good) { free((void *)terms->term_good); terms->term_good = xstrdup(good); free((void *)terms->term_bad); terms->term_bad = xstrdup(bad); } static const char vocab_bad[] = "bad|new"; static const char vocab_good[] = "good|old"; static int bisect_autostart(struct bisect_terms *terms); /* * Check whether the string `term` belongs to the set of strings * included in the variable arguments. */ LAST_ARG_MUST_BE_NULL static int one_of(const char *term, ...) { int res = 0; va_list matches; const char *match; va_start(matches, term); while (!res && (match = va_arg(matches, const char *))) res = !strcmp(term, match); va_end(matches); return res; } /* * return code BISECT_INTERNAL_SUCCESS_MERGE_BASE * and BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND are codes * that indicate special success. */ static int is_bisect_success(enum bisect_error res) { return !res || res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND || res == BISECT_INTERNAL_SUCCESS_MERGE_BASE; } static int write_in_file(const char *path, const char *mode, const char *format, va_list args) { FILE *fp = NULL; int res = 0; if (strcmp(mode, "w") && strcmp(mode, "a")) BUG("write-in-file does not support '%s' mode", mode); fp = fopen(path, mode); if (!fp) return error_errno(_("cannot open file '%s' in mode '%s'"), path, mode); res = vfprintf(fp, format, args); if (res < 0) { int saved_errno = errno; fclose(fp); errno = saved_errno; return error_errno(_("could not write to file '%s'"), path); } return fclose(fp); } __attribute__((format (printf, 2, 3))) static int write_to_file(const char *path, const char *format, ...) { int res; va_list args; va_start(args, format); res = write_in_file(path, "w", format, args); va_end(args); return res; } __attribute__((format (printf, 2, 3))) static int append_to_file(const char *path, const char *format, ...) { int res; va_list args; va_start(args, format); res = write_in_file(path, "a", format, args); va_end(args); return res; } static int print_file_to_stdout(const char *path) { int fd = open(path, O_RDONLY); int ret = 0; if (fd < 0) return error_errno(_("cannot open file '%s' for reading"), path); if (copy_fd(fd, 1) < 0) ret = error_errno(_("failed to read '%s'"), path); close(fd); return ret; } static int check_term_format(const char *term, const char *orig_term) { int res; char *new_term = xstrfmt("refs/bisect/%s", term); res = check_refname_format(new_term, 0); free(new_term); if (res) return error(_("'%s' is not a valid term"), term); if (one_of(term, "help", "start", "skip", "next", "reset", "visualize", "view", "replay", "log", "run", "terms", NULL)) return error(_("can't use the builtin command '%s' as a term"), term); /* * In theory, nothing prevents swapping completely good and bad, * but this situation could be confusing and hasn't been tested * enough. Forbid it for now. */ if ((strcmp(orig_term, "bad") && one_of(term, "bad", "new", NULL)) || (strcmp(orig_term, "good") && one_of(term, "good", "old", NULL))) return error(_("can't change the meaning of the term '%s'"), term); return 0; } static int write_terms(const char *bad, const char *good) { int res; if (!strcmp(bad, good)) return error(_("please use two different terms")); if (check_term_format(bad, "bad") || check_term_format(good, "good")) return -1; res = write_to_file(git_path_bisect_terms(), "%s\n%s\n", bad, good); return res; } static int bisect_reset(const char *commit) { struct strbuf branch = STRBUF_INIT; if (!commit) { if (!strbuf_read_file(&branch, git_path_bisect_start(), 0)) printf(_("We are not bisecting.\n")); else strbuf_rtrim(&branch); } else { struct object_id oid; if (repo_get_oid_commit(the_repository, commit, &oid)) return error(_("'%s' is not a valid commit"), commit); strbuf_addstr(&branch, commit); } if (branch.len && !refs_ref_exists(get_main_ref_store(the_repository), "BISECT_HEAD")) { struct child_process cmd = CHILD_PROCESS_INIT; cmd.git_cmd = 1; strvec_pushl(&cmd.args, "checkout", "--ignore-other-worktrees", branch.buf, "--", NULL); if (run_command(&cmd)) { error(_("could not check out original" " HEAD '%s'. Try 'git bisect" " reset '."), branch.buf); strbuf_release(&branch); return -1; } } strbuf_release(&branch); return bisect_clean_state(); } static void log_commit(FILE *fp, const char *fmt, const char *state, struct commit *commit) { struct pretty_print_context pp = {0}; struct strbuf commit_msg = STRBUF_INIT; char *label = xstrfmt(fmt, state); repo_format_commit_message(the_repository, commit, "%s", &commit_msg, &pp); fprintf(fp, "# %s: [%s] %s\n", label, oid_to_hex(&commit->object.oid), commit_msg.buf); strbuf_release(&commit_msg); free(label); } static int bisect_write(const char *state, const char *rev, const struct bisect_terms *terms, int nolog) { struct strbuf tag = STRBUF_INIT; struct object_id oid; struct commit *commit; FILE *fp = NULL; int res = 0; if (!strcmp(state, terms->term_bad)) { strbuf_addf(&tag, "refs/bisect/%s", state); } else if (one_of(state, terms->term_good, "skip", NULL)) { strbuf_addf(&tag, "refs/bisect/%s-%s", state, rev); } else { res = error(_("Bad bisect_write argument: %s"), state); goto finish; } if (repo_get_oid(the_repository, rev, &oid)) { res = error(_("couldn't get the oid of the rev '%s'"), rev); goto finish; } if (refs_update_ref(get_main_ref_store(the_repository), NULL, tag.buf, &oid, NULL, 0, UPDATE_REFS_MSG_ON_ERR)) { res = -1; goto finish; } fp = fopen(git_path_bisect_log(), "a"); if (!fp) { res = error_errno(_("couldn't open the file '%s'"), git_path_bisect_log()); goto finish; } commit = lookup_commit_reference(the_repository, &oid); log_commit(fp, "%s", state, commit); if (!nolog) fprintf(fp, "git bisect %s %s\n", state, rev); finish: if (fp) fclose(fp); strbuf_release(&tag); return res; } static int check_and_set_terms(struct bisect_terms *terms, const char *cmd) { int has_term_file = !is_empty_or_missing_file(git_path_bisect_terms()); if (one_of(cmd, "skip", "start", "terms", NULL)) return 0; if (has_term_file && strcmp(cmd, terms->term_bad) && strcmp(cmd, terms->term_good)) return error(_("Invalid command: you're currently in a " "%s/%s bisect"), terms->term_bad, terms->term_good); if (!has_term_file) { if (one_of(cmd, "bad", "good", NULL)) { set_terms(terms, "bad", "good"); return write_terms(terms->term_bad, terms->term_good); } if (one_of(cmd, "new", "old", NULL)) { set_terms(terms, "new", "old"); return write_terms(terms->term_bad, terms->term_good); } } return 0; } static int inc_nr(const char *refname UNUSED, const char *referent UNUSED, const struct object_id *oid UNUSED, int flag UNUSED, void *cb_data) { unsigned int *nr = (unsigned int *)cb_data; (*nr)++; return 0; } static const char need_bad_and_good_revision_warning[] = N_("You need to give me at least one %s and %s revision.\n" "You can use \"git bisect %s\" and \"git bisect %s\" for that."); static const char need_bisect_start_warning[] = N_("You need to start by \"git bisect start\".\n" "You then need to give me at least one %s and %s revision.\n" "You can use \"git bisect %s\" and \"git bisect %s\" for that."); static int decide_next(const struct bisect_terms *terms, const char *current_term, int missing_good, int missing_bad) { if (!missing_good && !missing_bad) return 0; if (!current_term) return -1; if (missing_good && !missing_bad && !strcmp(current_term, terms->term_good)) { char *yesno; /* * have bad (or new) but not good (or old). We could bisect * although this is less optimum. */ warning(_("bisecting only with a %s commit"), terms->term_bad); if (!isatty(0)) return 0; /* * TRANSLATORS: Make sure to include [Y] and [n] in your * translation. The program will only accept English input * at this point. */ yesno = git_prompt(_("Are you sure [Y/n]? "), PROMPT_ECHO); if (starts_with(yesno, "N") || starts_with(yesno, "n")) return -1; return 0; } if (!is_empty_or_missing_file(git_path_bisect_start())) return error(_(need_bad_and_good_revision_warning), vocab_bad, vocab_good, vocab_bad, vocab_good); else return error(_(need_bisect_start_warning), vocab_good, vocab_bad, vocab_good, vocab_bad); } static void bisect_status(struct bisect_state *state, const struct bisect_terms *terms) { char *bad_ref = xstrfmt("refs/bisect/%s", terms->term_bad); char *good_glob = xstrfmt("%s-*", terms->term_good); if (refs_ref_exists(get_main_ref_store(the_repository), bad_ref)) state->nr_bad = 1; refs_for_each_glob_ref_in(get_main_ref_store(the_repository), inc_nr, good_glob, "refs/bisect/", (void *) &state->nr_good); free(good_glob); free(bad_ref); } __attribute__((format (printf, 1, 2))) static void bisect_log_printf(const char *fmt, ...) { struct strbuf buf = STRBUF_INIT; va_list ap; va_start(ap, fmt); strbuf_vaddf(&buf, fmt, ap); va_end(ap); printf("%s", buf.buf); append_to_file(git_path_bisect_log(), "# %s", buf.buf); strbuf_release(&buf); } static void bisect_print_status(const struct bisect_terms *terms) { struct bisect_state state = { 0 }; bisect_status(&state, terms); /* If we had both, we'd already be started, and shouldn't get here. */ if (state.nr_good && state.nr_bad) return; if (!state.nr_good && !state.nr_bad) bisect_log_printf(_("status: waiting for both good and bad commits\n")); else if (state.nr_good) bisect_log_printf(Q_("status: waiting for bad commit, %d good commit known\n", "status: waiting for bad commit, %d good commits known\n", state.nr_good), state.nr_good); else bisect_log_printf(_("status: waiting for good commit(s), bad commit known\n")); } static int bisect_next_check(const struct bisect_terms *terms, const char *current_term) { struct bisect_state state = { 0 }; bisect_status(&state, terms); return decide_next(terms, current_term, !state.nr_good, !state.nr_bad); } static int get_terms(struct bisect_terms *terms) { struct strbuf str = STRBUF_INIT; FILE *fp = NULL; int res = 0; fp = fopen(git_path_bisect_terms(), "r"); if (!fp) { res = -1; goto finish; } free_terms(terms); strbuf_getline_lf(&str, fp); terms->term_bad = strbuf_detach(&str, NULL); strbuf_getline_lf(&str, fp); terms->term_good = strbuf_detach(&str, NULL); finish: if (fp) fclose(fp); strbuf_release(&str); return res; } static int bisect_terms(struct bisect_terms *terms, const char *option) { if (get_terms(terms)) return error(_("no terms defined")); if (!option) { printf(_("Your current terms are %s for the old state\n" "and %s for the new state.\n"), terms->term_good, terms->term_bad); return 0; } if (one_of(option, "--term-good", "--term-old", NULL)) printf("%s\n", terms->term_good); else if (one_of(option, "--term-bad", "--term-new", NULL)) printf("%s\n", terms->term_bad); else return error(_("invalid argument %s for 'git bisect terms'.\n" "Supported options are: " "--term-good|--term-old and " "--term-bad|--term-new."), option); return 0; } static int bisect_append_log_quoted(const char **argv) { int res = 0; FILE *fp = fopen(git_path_bisect_log(), "a"); struct strbuf orig_args = STRBUF_INIT; if (!fp) return -1; if (fprintf(fp, "git bisect start") < 1) { res = -1; goto finish; } sq_quote_argv(&orig_args, argv); if (fprintf(fp, "%s\n", orig_args.buf) < 1) res = -1; finish: fclose(fp); strbuf_release(&orig_args); return res; } static int add_bisect_ref(const char *refname, const char *referent UNUSED, const struct object_id *oid, int flags UNUSED, void *cb) { struct add_bisect_ref_data *data = cb; add_pending_oid(data->revs, refname, oid, data->object_flags); return 0; } static int prepare_revs(struct bisect_terms *terms, struct rev_info *revs) { int res = 0; struct add_bisect_ref_data cb = { revs }; char *good = xstrfmt("%s-*", terms->term_good); /* * We cannot use terms->term_bad directly in * for_each_glob_ref_in() and we have to append a '*' to it, * otherwise for_each_glob_ref_in() will append '/' and '*'. */ char *bad = xstrfmt("%s*", terms->term_bad); /* * It is important to reset the flags used by revision walks * as the previous call to bisect_next_all() in turn * sets up a revision walk. */ reset_revision_walk(); repo_init_revisions(the_repository, revs, NULL); setup_revisions(0, NULL, revs, NULL); refs_for_each_glob_ref_in(get_main_ref_store(the_repository), add_bisect_ref, bad, "refs/bisect/", &cb); cb.object_flags = UNINTERESTING; refs_for_each_glob_ref_in(get_main_ref_store(the_repository), add_bisect_ref, good, "refs/bisect/", &cb); if (prepare_revision_walk(revs)) res = error(_("revision walk setup failed")); free(good); free(bad); return res; } static int bisect_skipped_commits(struct bisect_terms *terms) { int res; FILE *fp = NULL; struct rev_info revs; struct commit *commit; struct pretty_print_context pp = {0}; struct strbuf commit_name = STRBUF_INIT; res = prepare_revs(terms, &revs); if (res) return res; fp = fopen(git_path_bisect_log(), "a"); if (!fp) return error_errno(_("could not open '%s' for appending"), git_path_bisect_log()); if (fprintf(fp, "# only skipped commits left to test\n") < 0) return error_errno(_("failed to write to '%s'"), git_path_bisect_log()); while ((commit = get_revision(&revs)) != NULL) { strbuf_reset(&commit_name); repo_format_commit_message(the_repository, commit, "%s", &commit_name, &pp); fprintf(fp, "# possible first %s commit: [%s] %s\n", terms->term_bad, oid_to_hex(&commit->object.oid), commit_name.buf); } /* * Reset the flags used by revision walks in case * there is another revision walk after this one. */ reset_revision_walk(); strbuf_release(&commit_name); release_revisions(&revs); fclose(fp); return 0; } static int bisect_successful(struct bisect_terms *terms) { struct object_id oid; struct commit *commit; struct pretty_print_context pp = {0}; struct strbuf commit_name = STRBUF_INIT; char *bad_ref = xstrfmt("refs/bisect/%s",terms->term_bad); int res; refs_read_ref(get_main_ref_store(the_repository), bad_ref, &oid); commit = lookup_commit_reference_by_name(bad_ref); repo_format_commit_message(the_repository, commit, "%s", &commit_name, &pp); res = append_to_file(git_path_bisect_log(), "# first %s commit: [%s] %s\n", terms->term_bad, oid_to_hex(&commit->object.oid), commit_name.buf); strbuf_release(&commit_name); free(bad_ref); return res; } static enum bisect_error bisect_next(struct bisect_terms *terms, const char *prefix) { enum bisect_error res; if (bisect_autostart(terms)) return BISECT_FAILED; if (bisect_next_check(terms, terms->term_good)) return BISECT_FAILED; /* Perform all bisection computation */ res = bisect_next_all(the_repository, prefix); if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) { res = bisect_successful(terms); return res ? res : BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND; } else if (res == BISECT_ONLY_SKIPPED_LEFT) { res = bisect_skipped_commits(terms); return res ? res : BISECT_ONLY_SKIPPED_LEFT; } return res; } static enum bisect_error bisect_auto_next(struct bisect_terms *terms, const char *prefix) { if (bisect_next_check(terms, NULL)) { bisect_print_status(terms); return BISECT_OK; } return bisect_next(terms, prefix); } static enum bisect_error bisect_start(struct bisect_terms *terms, int argc, const char **argv) { int no_checkout = 0; int first_parent_only = 0; int i, has_double_dash = 0, must_write_terms = 0, bad_seen = 0; int flags, pathspec_pos; enum bisect_error res = BISECT_OK; struct string_list revs = STRING_LIST_INIT_DUP; struct string_list states = STRING_LIST_INIT_DUP; struct strbuf start_head = STRBUF_INIT; struct strbuf bisect_names = STRBUF_INIT; struct object_id head_oid; struct object_id oid; const char *head; if (is_bare_repository()) no_checkout = 1; /* * Check for one bad and then some good revisions */ for (i = 0; i < argc; i++) { if (!strcmp(argv[i], "--")) { has_double_dash = 1; break; } } for (i = 0; i < argc; i++) { const char *arg = argv[i]; if (!strcmp(argv[i], "--")) { break; } else if (!strcmp(arg, "--no-checkout")) { no_checkout = 1; } else if (!strcmp(arg, "--first-parent")) { first_parent_only = 1; } else if (!strcmp(arg, "--term-good") || !strcmp(arg, "--term-old")) { i++; if (argc <= i) return error(_("'' is not a valid term")); must_write_terms = 1; free((void *) terms->term_good); terms->term_good = xstrdup(argv[i]); } else if (skip_prefix(arg, "--term-good=", &arg) || skip_prefix(arg, "--term-old=", &arg)) { must_write_terms = 1; free((void *) terms->term_good); terms->term_good = xstrdup(arg); } else if (!strcmp(arg, "--term-bad") || !strcmp(arg, "--term-new")) { i++; if (argc <= i) return error(_("'' is not a valid term")); must_write_terms = 1; free((void *) terms->term_bad); terms->term_bad = xstrdup(argv[i]); } else if (skip_prefix(arg, "--term-bad=", &arg) || skip_prefix(arg, "--term-new=", &arg)) { must_write_terms = 1; free((void *) terms->term_bad); terms->term_bad = xstrdup(arg); } else if (starts_with(arg, "--")) { return error(_("unrecognized option: '%s'"), arg); } else if (!get_oidf(&oid, "%s^{commit}", arg)) { string_list_append(&revs, oid_to_hex(&oid)); } else if (has_double_dash) { die(_("'%s' does not appear to be a valid " "revision"), arg); } else { break; } } pathspec_pos = i; /* * The user ran "git bisect start ", hence did not * explicitly specify the terms, but we are already starting to * set references named with the default terms, and won't be able * to change afterwards. */ if (revs.nr) must_write_terms = 1; for (i = 0; i < revs.nr; i++) { if (bad_seen) { string_list_append(&states, terms->term_good); } else { bad_seen = 1; string_list_append(&states, terms->term_bad); } } /* * Verify HEAD */ head = refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", 0, &head_oid, &flags); if (!head) if (repo_get_oid(the_repository, "HEAD", &head_oid)) return error(_("bad HEAD - I need a HEAD")); /* * Check if we are bisecting */ if (!is_empty_or_missing_file(git_path_bisect_start())) { /* Reset to the rev from where we started */ strbuf_read_file(&start_head, git_path_bisect_start(), 0); strbuf_trim(&start_head); if (!no_checkout) { struct child_process cmd = CHILD_PROCESS_INIT; cmd.git_cmd = 1; strvec_pushl(&cmd.args, "checkout", start_head.buf, "--", NULL); if (run_command(&cmd)) { res = error(_("checking out '%s' failed." " Try 'git bisect start " "'."), start_head.buf); goto finish; } } } else { /* Get the rev from where we start. */ if (!repo_get_oid(the_repository, head, &head_oid) && !starts_with(head, "refs/heads/")) { strbuf_reset(&start_head); strbuf_addstr(&start_head, oid_to_hex(&head_oid)); } else if (!repo_get_oid(the_repository, head, &head_oid) && skip_prefix(head, "refs/heads/", &head)) { strbuf_addstr(&start_head, head); } else { return error(_("bad HEAD - strange symbolic ref")); } } /* * Get rid of any old bisect state. */ if (bisect_clean_state()) return BISECT_FAILED; /* * Write new start state */ write_file(git_path_bisect_start(), "%s\n", start_head.buf); if (first_parent_only) write_file(git_path_bisect_first_parent(), "\n"); if (no_checkout) { if (repo_get_oid(the_repository, start_head.buf, &oid) < 0) { res = error(_("invalid ref: '%s'"), start_head.buf); goto finish; } if (refs_update_ref(get_main_ref_store(the_repository), NULL, "BISECT_HEAD", &oid, NULL, 0, UPDATE_REFS_MSG_ON_ERR)) { res = BISECT_FAILED; goto finish; } } if (pathspec_pos < argc - 1) sq_quote_argv(&bisect_names, argv + pathspec_pos); write_file(git_path_bisect_names(), "%s\n", bisect_names.buf); for (i = 0; i < states.nr; i++) if (bisect_write(states.items[i].string, revs.items[i].string, terms, 1)) { res = BISECT_FAILED; goto finish; } if (must_write_terms && write_terms(terms->term_bad, terms->term_good)) { res = BISECT_FAILED; goto finish; } res = bisect_append_log_quoted(argv); if (res) res = BISECT_FAILED; finish: string_list_clear(&revs, 0); string_list_clear(&states, 0); strbuf_release(&start_head); strbuf_release(&bisect_names); if (res) return res; res = bisect_auto_next(terms, NULL); if (!is_bisect_success(res)) bisect_clean_state(); return res; } static inline int file_is_not_empty(const char *path) { return !is_empty_or_missing_file(path); } static int bisect_autostart(struct bisect_terms *terms) { int res; const char *yesno; if (file_is_not_empty(git_path_bisect_start())) return 0; fprintf_ln(stderr, _("You need to start by \"git bisect " "start\"\n")); if (!isatty(STDIN_FILENO)) return -1; /* * TRANSLATORS: Make sure to include [Y] and [n] in your * translation. The program will only accept English input * at this point. */ yesno = git_prompt(_("Do you want me to do it for you " "[Y/n]? "), PROMPT_ECHO); res = tolower(*yesno) == 'n' ? -1 : bisect_start(terms, 0, empty_strvec); return res; } static enum bisect_error bisect_state(struct bisect_terms *terms, int argc, const char **argv) { const char *state; int i, verify_expected = 1; struct object_id oid, expected; struct oid_array revs = OID_ARRAY_INIT; if (!argc) return error(_("Please call `--bisect-state` with at least one argument")); if (bisect_autostart(terms)) return BISECT_FAILED; state = argv[0]; if (check_and_set_terms(terms, state) || !one_of(state, terms->term_good, terms->term_bad, "skip", NULL)) return BISECT_FAILED; argv++; argc--; if (argc > 1 && !strcmp(state, terms->term_bad)) return error(_("'git bisect %s' can take only one argument."), terms->term_bad); if (argc == 0) { const char *head = "BISECT_HEAD"; enum get_oid_result res_head = repo_get_oid(the_repository, head, &oid); if (res_head == MISSING_OBJECT) { head = "HEAD"; res_head = repo_get_oid(the_repository, head, &oid); } if (res_head) error(_("Bad rev input: %s"), head); oid_array_append(&revs, &oid); } /* * All input revs must be checked before executing bisect_write() * to discard junk revs. */ for (; argc; argc--, argv++) { struct commit *commit; if (repo_get_oid(the_repository, *argv, &oid)){ error(_("Bad rev input: %s"), *argv); oid_array_clear(&revs); return BISECT_FAILED; } commit = lookup_commit_reference(the_repository, &oid); if (!commit) die(_("Bad rev input (not a commit): %s"), *argv); oid_array_append(&revs, &commit->object.oid); } if (refs_read_ref(get_main_ref_store(the_repository), "BISECT_EXPECTED_REV", &expected)) verify_expected = 0; /* Ignore invalid file contents */ for (i = 0; i < revs.nr; i++) { if (bisect_write(state, oid_to_hex(&revs.oid[i]), terms, 0)) { oid_array_clear(&revs); return BISECT_FAILED; } if (verify_expected && !oideq(&revs.oid[i], &expected)) { unlink_or_warn(git_path_bisect_ancestors_ok()); refs_delete_ref(get_main_ref_store(the_repository), NULL, "BISECT_EXPECTED_REV", NULL, REF_NO_DEREF); verify_expected = 0; } } oid_array_clear(&revs); return bisect_auto_next(terms, NULL); } static enum bisect_error bisect_log(void) { int fd, status; const char* filename = git_path_bisect_log(); if (is_empty_or_missing_file(filename)) return error(_("We are not bisecting.")); fd = open(filename, O_RDONLY); if (fd < 0) return BISECT_FAILED; status = copy_fd(fd, STDOUT_FILENO); close(fd); return status ? BISECT_FAILED : BISECT_OK; } static int process_replay_line(struct bisect_terms *terms, struct strbuf *line) { const char *p = line->buf + strspn(line->buf, " \t"); char *word_end, *rev; if ((!skip_prefix(p, "git bisect", &p) && !skip_prefix(p, "git-bisect", &p)) || !isspace(*p)) return 0; p += strspn(p, " \t"); word_end = (char *)p + strcspn(p, " \t"); rev = word_end + strspn(word_end, " \t"); *word_end = '\0'; /* NUL-terminate the word */ get_terms(terms); if (check_and_set_terms(terms, p)) return -1; if (!strcmp(p, "start")) { struct strvec argv = STRVEC_INIT; int res; sq_dequote_to_strvec(rev, &argv); res = bisect_start(terms, argv.nr, argv.v); strvec_clear(&argv); return res; } if (one_of(p, terms->term_good, terms->term_bad, "skip", NULL)) return bisect_write(p, rev, terms, 0); if (!strcmp(p, "terms")) { struct strvec argv = STRVEC_INIT; int res; sq_dequote_to_strvec(rev, &argv); res = bisect_terms(terms, argv.nr == 1 ? argv.v[0] : NULL); strvec_clear(&argv); return res; } error(_("'%s'?? what are you talking about?"), p); return -1; } static enum bisect_error bisect_replay(struct bisect_terms *terms, const char *filename) { FILE *fp = NULL; enum bisect_error res = BISECT_OK; struct strbuf line = STRBUF_INIT; if (is_empty_or_missing_file(filename)) return error(_("cannot read file '%s' for replaying"), filename); if (bisect_reset(NULL)) return BISECT_FAILED; fp = fopen(filename, "r"); if (!fp) return BISECT_FAILED; while ((strbuf_getline(&line, fp) != EOF) && !res) res = process_replay_line(terms, &line); strbuf_release(&line); fclose(fp); if (res) return BISECT_FAILED; return bisect_auto_next(terms, NULL); } static enum bisect_error bisect_skip(struct bisect_terms *terms, int argc, const char **argv) { int i; enum bisect_error res; struct strvec argv_state = STRVEC_INIT; strvec_push(&argv_state, "skip"); for (i = 0; i < argc; i++) { const char *dotdot = strstr(argv[i], ".."); if (dotdot) { struct rev_info revs; struct commit *commit; repo_init_revisions(the_repository, &revs, NULL); setup_revisions(2, argv + i - 1, &revs, NULL); if (prepare_revision_walk(&revs)) die(_("revision walk setup failed")); while ((commit = get_revision(&revs)) != NULL) strvec_push(&argv_state, oid_to_hex(&commit->object.oid)); reset_revision_walk(); release_revisions(&revs); } else { strvec_push(&argv_state, argv[i]); } } res = bisect_state(terms, argv_state.nr, argv_state.v); strvec_clear(&argv_state); return res; } static int bisect_visualize(struct bisect_terms *terms, int argc, const char **argv) { struct child_process cmd = CHILD_PROCESS_INIT; struct strbuf sb = STRBUF_INIT; if (bisect_next_check(terms, NULL) != 0) return BISECT_FAILED; cmd.no_stdin = 1; if (!argc) { if ((getenv("DISPLAY") || getenv("SESSIONNAME") || getenv("MSYSTEM") || getenv("SECURITYSESSIONID")) && exists_in_PATH("gitk")) { strvec_push(&cmd.args, "gitk"); } else { strvec_push(&cmd.args, "log"); cmd.git_cmd = 1; } } else { if (argv[0][0] == '-') { strvec_push(&cmd.args, "log"); cmd.git_cmd = 1; } else if (strcmp(argv[0], "tig") && !starts_with(argv[0], "git")) cmd.git_cmd = 1; strvec_pushv(&cmd.args, argv); } strvec_pushl(&cmd.args, "--bisect", "--", NULL); strbuf_read_file(&sb, git_path_bisect_names(), 0); sq_dequote_to_strvec(sb.buf, &cmd.args); strbuf_release(&sb); return run_command(&cmd); } static int get_first_good(const char *refname UNUSED, const char *referent UNUSED, const struct object_id *oid, int flag UNUSED, void *cb_data) { oidcpy(cb_data, oid); return 1; } static int do_bisect_run(const char *command) { struct child_process cmd = CHILD_PROCESS_INIT; printf(_("running %s\n"), command); cmd.use_shell = 1; strvec_push(&cmd.args, command); return run_command(&cmd); } static int verify_good(const struct bisect_terms *terms, const char *command) { int rc; enum bisect_error res; struct object_id good_rev; struct object_id current_rev; char *good_glob = xstrfmt("%s-*", terms->term_good); int no_checkout = refs_ref_exists(get_main_ref_store(the_repository), "BISECT_HEAD"); refs_for_each_glob_ref_in(get_main_ref_store(the_repository), get_first_good, good_glob, "refs/bisect/", &good_rev); free(good_glob); if (refs_read_ref(get_main_ref_store(the_repository), no_checkout ? "BISECT_HEAD" : "HEAD", ¤t_rev)) return -1; res = bisect_checkout(&good_rev, no_checkout); if (res != BISECT_OK) return -1; rc = do_bisect_run(command); res = bisect_checkout(¤t_rev, no_checkout); if (res != BISECT_OK) return -1; return rc; } static int bisect_run(struct bisect_terms *terms, int argc, const char **argv) { int res = BISECT_OK; struct strbuf command = STRBUF_INIT; const char *new_state; int temporary_stdout_fd, saved_stdout; int is_first_run = 1; if (bisect_next_check(terms, NULL)) return BISECT_FAILED; if (!argc) { error(_("bisect run failed: no command provided.")); return BISECT_FAILED; } sq_quote_argv(&command, argv); strbuf_ltrim(&command); while (1) { res = do_bisect_run(command.buf); /* * Exit code 126 and 127 can either come from the shell * if it was unable to execute or even find the script, * or from the script itself. Check with a known-good * revision to avoid trashing the bisect run due to a * missing or non-executable script. */ if (is_first_run && (res == 126 || res == 127)) { int rc = verify_good(terms, command.buf); is_first_run = 0; if (rc < 0 || 128 <= rc) { error(_("unable to verify %s on good" " revision"), command.buf); res = BISECT_FAILED; break; } if (rc == res) { error(_("bogus exit code %d for good revision"), rc); res = BISECT_FAILED; break; } } if (res < 0 || 128 <= res) { error(_("bisect run failed: exit code %d from" " %s is < 0 or >= 128"), res, command.buf); break; } if (res == 125) new_state = "skip"; else if (!res) new_state = terms->term_good; else new_state = terms->term_bad; temporary_stdout_fd = open(git_path_bisect_run(), O_CREAT | O_WRONLY | O_TRUNC, 0666); if (temporary_stdout_fd < 0) { res = error_errno(_("cannot open file '%s' for writing"), git_path_bisect_run()); break; } fflush(stdout); saved_stdout = dup(1); dup2(temporary_stdout_fd, 1); res = bisect_state(terms, 1, &new_state); fflush(stdout); dup2(saved_stdout, 1); close(saved_stdout); close(temporary_stdout_fd); print_file_to_stdout(git_path_bisect_run()); if (res == BISECT_ONLY_SKIPPED_LEFT) error(_("bisect run cannot continue any more")); else if (res == BISECT_INTERNAL_SUCCESS_MERGE_BASE) { puts(_("bisect run success")); res = BISECT_OK; } else if (res == BISECT_INTERNAL_SUCCESS_1ST_BAD_FOUND) { puts(_("bisect found first bad commit")); res = BISECT_OK; } else if (res) { error(_("bisect run failed: 'git bisect %s'" " exited with error code %d"), new_state, res); } else { continue; } break; } strbuf_release(&command); return res; } static int cmd_bisect__reset(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { if (argc > 1) return error(_("'%s' requires either no argument or a commit"), "git bisect reset"); return bisect_reset(argc ? argv[0] : NULL); } static int cmd_bisect__terms(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; if (argc > 1) return error(_("'%s' requires 0 or 1 argument"), "git bisect terms"); res = bisect_terms(&terms, argc == 1 ? argv[0] : NULL); free_terms(&terms); return res; } static int cmd_bisect__start(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; set_terms(&terms, "bad", "good"); res = bisect_start(&terms, argc, argv); free_terms(&terms); return res; } static int cmd_bisect__next(int argc, const char **argv UNUSED, const char *prefix, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; if (argc) return error(_("'%s' requires 0 arguments"), "git bisect next"); get_terms(&terms); res = bisect_next(&terms, prefix); free_terms(&terms); return res; } static int cmd_bisect__log(int argc UNUSED, const char **argv UNUSED, const char *prefix UNUSED, struct repository *repo UNUSED) { return bisect_log(); } static int cmd_bisect__replay(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; if (argc != 1) return error(_("no logfile given")); set_terms(&terms, "bad", "good"); res = bisect_replay(&terms, argv[0]); free_terms(&terms); return res; } static int cmd_bisect__skip(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; set_terms(&terms, "bad", "good"); get_terms(&terms); res = bisect_skip(&terms, argc, argv); free_terms(&terms); return res; } static int cmd_bisect__visualize(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; get_terms(&terms); res = bisect_visualize(&terms, argc, argv); free_terms(&terms); return res; } static int cmd_bisect__run(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED) { int res; struct bisect_terms terms = { 0 }; if (!argc) return error(_("'%s' failed: no command provided."), "git bisect run"); get_terms(&terms); res = bisect_run(&terms, argc, argv); free_terms(&terms); return res; } int cmd_bisect(int argc, const char **argv, const char *prefix, struct repository *repo) { int res = 0; parse_opt_subcommand_fn *fn = NULL; struct option options[] = { OPT_SUBCOMMAND("reset", &fn, cmd_bisect__reset), OPT_SUBCOMMAND("terms", &fn, cmd_bisect__terms), OPT_SUBCOMMAND("start", &fn, cmd_bisect__start), OPT_SUBCOMMAND("next", &fn, cmd_bisect__next), OPT_SUBCOMMAND("log", &fn, cmd_bisect__log), OPT_SUBCOMMAND("replay", &fn, cmd_bisect__replay), OPT_SUBCOMMAND("skip", &fn, cmd_bisect__skip), OPT_SUBCOMMAND("visualize", &fn, cmd_bisect__visualize), OPT_SUBCOMMAND("view", &fn, cmd_bisect__visualize), OPT_SUBCOMMAND("run", &fn, cmd_bisect__run), OPT_END() }; argc = parse_options(argc, argv, prefix, options, git_bisect_usage, PARSE_OPT_SUBCOMMAND_OPTIONAL); if (!fn) { struct bisect_terms terms = { 0 }; if (!argc) usage_msg_opt(_("need a command"), git_bisect_usage, options); set_terms(&terms, "bad", "good"); get_terms(&terms); if (check_and_set_terms(&terms, argv[0])) usage_msg_optf(_("unknown command: '%s'"), git_bisect_usage, options, argv[0]); res = bisect_state(&terms, argc, argv); free_terms(&terms); } else { argc--; argv++; res = fn(argc, argv, prefix, repo); } return is_bisect_success(res) ? 0 : -res; } git-cinnabar-0.7.0/git-core/builtin/blame.c000064400000000000000000001043411046102023000165740ustar 00000000000000/* * Blame * * Copyright (c) 2006, 2014 by its authors * See COPYING for licensing conditions */ #define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "config.h" #include "color.h" #include "builtin.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "commit.h" #include "diff.h" #include "revision.h" #include "quote.h" #include "string-list.h" #include "mailmap.h" #include "parse-options.h" #include "prio-queue.h" #include "utf8.h" #include "userdiff.h" #include "line-range.h" #include "line-log.h" #include "progress.h" #include "object-name.h" #include "object-store-ll.h" #include "pager.h" #include "blame.h" #include "refs.h" #include "setup.h" #include "tag.h" #include "write-or-die.h" static char blame_usage[] = N_("git blame [] [] [] [--] "); static char annotate_usage[] = N_("git annotate [] [] [] [--] "); static const char *blame_opt_usage[] = { blame_usage, "", N_(" are documented in git-rev-list(1)"), NULL }; static const char *annotate_opt_usage[] = { annotate_usage, "", N_(" are documented in git-rev-list(1)"), NULL }; static int longest_file; static int longest_author; static int max_orig_digits; static int max_digits; static int max_score_digits; static int show_root; static int reverse; static int blank_boundary; static int incremental; static int xdl_opts; static int abbrev = -1; static int no_whole_file_rename; static int show_progress; static char repeated_meta_color[COLOR_MAXLEN]; static int coloring_mode; static struct string_list ignore_revs_file_list = STRING_LIST_INIT_DUP; static int mark_unblamable_lines; static int mark_ignored_lines; static struct date_mode blame_date_mode = { DATE_ISO8601 }; static size_t blame_date_width; static struct string_list mailmap = STRING_LIST_INIT_NODUP; #ifndef DEBUG_BLAME #define DEBUG_BLAME 0 #endif static unsigned blame_move_score; static unsigned blame_copy_score; /* Remember to update object flag allocation in object.h */ #define METAINFO_SHOWN (1u<<12) #define MORE_THAN_ONE_PATH (1u<<13) struct progress_info { struct progress *progress; int blamed_lines; }; static const char *nth_line_cb(void *data, long lno) { return blame_nth_line((struct blame_scoreboard *)data, lno); } /* * Information on commits, used for output. */ struct commit_info { struct strbuf author; struct strbuf author_mail; timestamp_t author_time; struct strbuf author_tz; /* filled only when asked for details */ struct strbuf committer; struct strbuf committer_mail; timestamp_t committer_time; struct strbuf committer_tz; struct strbuf summary; }; #define COMMIT_INFO_INIT { \ .author = STRBUF_INIT, \ .author_mail = STRBUF_INIT, \ .author_tz = STRBUF_INIT, \ .committer = STRBUF_INIT, \ .committer_mail = STRBUF_INIT, \ .committer_tz = STRBUF_INIT, \ .summary = STRBUF_INIT, \ } /* * Parse author/committer line in the commit object buffer */ static void get_ac_line(const char *inbuf, const char *what, struct strbuf *name, struct strbuf *mail, timestamp_t *time, struct strbuf *tz) { struct ident_split ident; size_t len, maillen, namelen; const char *tmp, *endp; const char *namebuf, *mailbuf; tmp = strstr(inbuf, what); if (!tmp) goto error_out; tmp += strlen(what); endp = strchr(tmp, '\n'); if (!endp) len = strlen(tmp); else len = endp - tmp; if (split_ident_line(&ident, tmp, len)) { error_out: /* Ugh */ tmp = "(unknown)"; strbuf_addstr(name, tmp); strbuf_addstr(mail, tmp); strbuf_addstr(tz, tmp); *time = 0; return; } namelen = ident.name_end - ident.name_begin; namebuf = ident.name_begin; maillen = ident.mail_end - ident.mail_begin; mailbuf = ident.mail_begin; if (ident.date_begin && ident.date_end) *time = strtoul(ident.date_begin, NULL, 10); else *time = 0; if (ident.tz_begin && ident.tz_end) strbuf_add(tz, ident.tz_begin, ident.tz_end - ident.tz_begin); else strbuf_addstr(tz, "(unknown)"); /* * Now, convert both name and e-mail using mailmap */ map_user(&mailmap, &mailbuf, &maillen, &namebuf, &namelen); strbuf_addf(mail, "<%.*s>", (int)maillen, mailbuf); strbuf_add(name, namebuf, namelen); } static void commit_info_destroy(struct commit_info *ci) { strbuf_release(&ci->author); strbuf_release(&ci->author_mail); strbuf_release(&ci->author_tz); strbuf_release(&ci->committer); strbuf_release(&ci->committer_mail); strbuf_release(&ci->committer_tz); strbuf_release(&ci->summary); } static void get_commit_info(struct commit *commit, struct commit_info *ret, int detailed) { int len; const char *subject, *encoding; const char *message; encoding = get_log_output_encoding(); message = repo_logmsg_reencode(the_repository, commit, NULL, encoding); get_ac_line(message, "\nauthor ", &ret->author, &ret->author_mail, &ret->author_time, &ret->author_tz); if (!detailed) { repo_unuse_commit_buffer(the_repository, commit, message); return; } get_ac_line(message, "\ncommitter ", &ret->committer, &ret->committer_mail, &ret->committer_time, &ret->committer_tz); len = find_commit_subject(message, &subject); if (len) strbuf_add(&ret->summary, subject, len); else strbuf_addf(&ret->summary, "(%s)", oid_to_hex(&commit->object.oid)); repo_unuse_commit_buffer(the_repository, commit, message); } /* * Write out any suspect information which depends on the path. This must be * handled separately from emit_one_suspect_detail(), because a given commit * may have changes in multiple paths. So this needs to appear each time * we mention a new group. * * To allow LF and other nonportable characters in pathnames, * they are c-style quoted as needed. */ static void write_filename_info(struct blame_origin *suspect) { if (suspect->previous) { struct blame_origin *prev = suspect->previous; printf("previous %s ", oid_to_hex(&prev->commit->object.oid)); write_name_quoted(prev->path, stdout, '\n'); } printf("filename "); write_name_quoted(suspect->path, stdout, '\n'); } /* * Porcelain/Incremental format wants to show a lot of details per * commit. Instead of repeating this every line, emit it only once, * the first time each commit appears in the output (unless the * user has specifically asked for us to repeat). */ static int emit_one_suspect_detail(struct blame_origin *suspect, int repeat) { struct commit_info ci = COMMIT_INFO_INIT; if (!repeat && (suspect->commit->object.flags & METAINFO_SHOWN)) return 0; suspect->commit->object.flags |= METAINFO_SHOWN; get_commit_info(suspect->commit, &ci, 1); printf("author %s\n", ci.author.buf); printf("author-mail %s\n", ci.author_mail.buf); printf("author-time %"PRItime"\n", ci.author_time); printf("author-tz %s\n", ci.author_tz.buf); printf("committer %s\n", ci.committer.buf); printf("committer-mail %s\n", ci.committer_mail.buf); printf("committer-time %"PRItime"\n", ci.committer_time); printf("committer-tz %s\n", ci.committer_tz.buf); printf("summary %s\n", ci.summary.buf); if (suspect->commit->object.flags & UNINTERESTING) printf("boundary\n"); commit_info_destroy(&ci); return 1; } /* * The blame_entry is found to be guilty for the range. * Show it in incremental output. */ static void found_guilty_entry(struct blame_entry *ent, void *data) { struct progress_info *pi = (struct progress_info *)data; if (incremental) { struct blame_origin *suspect = ent->suspect; printf("%s %d %d %d\n", oid_to_hex(&suspect->commit->object.oid), ent->s_lno + 1, ent->lno + 1, ent->num_lines); emit_one_suspect_detail(suspect, 0); write_filename_info(suspect); maybe_flush_or_die(stdout, "stdout"); } pi->blamed_lines += ent->num_lines; display_progress(pi->progress, pi->blamed_lines); } static const char *format_time(timestamp_t time, const char *tz_str, int show_raw_time) { static struct strbuf time_buf = STRBUF_INIT; strbuf_reset(&time_buf); if (show_raw_time) { strbuf_addf(&time_buf, "%"PRItime" %s", time, tz_str); } else { const char *time_str; size_t time_width; int tz; tz = atoi(tz_str); time_str = show_date(time, tz, blame_date_mode); strbuf_addstr(&time_buf, time_str); /* * Add space paddings to time_buf to display a fixed width * string, and use time_width for display width calibration. */ for (time_width = utf8_strwidth(time_str); time_width < blame_date_width; time_width++) strbuf_addch(&time_buf, ' '); } return time_buf.buf; } #define OUTPUT_ANNOTATE_COMPAT (1U<<0) #define OUTPUT_LONG_OBJECT_NAME (1U<<1) #define OUTPUT_RAW_TIMESTAMP (1U<<2) #define OUTPUT_PORCELAIN (1U<<3) #define OUTPUT_SHOW_NAME (1U<<4) #define OUTPUT_SHOW_NUMBER (1U<<5) #define OUTPUT_SHOW_SCORE (1U<<6) #define OUTPUT_NO_AUTHOR (1U<<7) #define OUTPUT_SHOW_EMAIL (1U<<8) #define OUTPUT_LINE_PORCELAIN (1U<<9) #define OUTPUT_COLOR_LINE (1U<<10) #define OUTPUT_SHOW_AGE_WITH_COLOR (1U<<11) static void emit_porcelain_details(struct blame_origin *suspect, int repeat) { if (emit_one_suspect_detail(suspect, repeat) || (suspect->commit->object.flags & MORE_THAN_ONE_PATH)) write_filename_info(suspect); } static void emit_porcelain(struct blame_scoreboard *sb, struct blame_entry *ent, int opt) { int repeat = opt & OUTPUT_LINE_PORCELAIN; int cnt; const char *cp; struct blame_origin *suspect = ent->suspect; char hex[GIT_MAX_HEXSZ + 1]; oid_to_hex_r(hex, &suspect->commit->object.oid); printf("%s %d %d %d\n", hex, ent->s_lno + 1, ent->lno + 1, ent->num_lines); emit_porcelain_details(suspect, repeat); cp = blame_nth_line(sb, ent->lno); for (cnt = 0; cnt < ent->num_lines; cnt++) { char ch; if (cnt) { printf("%s %d %d\n", hex, ent->s_lno + 1 + cnt, ent->lno + 1 + cnt); if (repeat) emit_porcelain_details(suspect, 1); } putchar('\t'); do { ch = *cp++; putchar(ch); } while (ch != '\n' && cp < sb->final_buf + sb->final_buf_size); } if (sb->final_buf_size && cp[-1] != '\n') putchar('\n'); } static struct color_field { timestamp_t hop; char col[COLOR_MAXLEN]; } *colorfield; static int colorfield_nr, colorfield_alloc; static void parse_color_fields(const char *s) { struct string_list l = STRING_LIST_INIT_DUP; struct string_list_item *item; enum { EXPECT_DATE, EXPECT_COLOR } next = EXPECT_COLOR; colorfield_nr = 0; /* Ideally this would be stripped and split at the same time? */ string_list_split(&l, s, ',', -1); ALLOC_GROW(colorfield, colorfield_nr + 1, colorfield_alloc); for_each_string_list_item(item, &l) { switch (next) { case EXPECT_DATE: colorfield[colorfield_nr].hop = approxidate(item->string); next = EXPECT_COLOR; colorfield_nr++; ALLOC_GROW(colorfield, colorfield_nr + 1, colorfield_alloc); break; case EXPECT_COLOR: if (color_parse(item->string, colorfield[colorfield_nr].col)) die(_("expecting a color: %s"), item->string); next = EXPECT_DATE; break; } } if (next == EXPECT_COLOR) die(_("must end with a color")); colorfield[colorfield_nr].hop = TIME_MAX; string_list_clear(&l, 0); } static void setup_default_color_by_age(void) { parse_color_fields("blue,12 month ago,white,1 month ago,red"); } static void determine_line_heat(struct commit_info *ci, const char **dest_color) { int i = 0; while (i < colorfield_nr && ci->author_time > colorfield[i].hop) i++; *dest_color = colorfield[i].col; } static void emit_other(struct blame_scoreboard *sb, struct blame_entry *ent, int opt) { int cnt; const char *cp; struct blame_origin *suspect = ent->suspect; struct commit_info ci = COMMIT_INFO_INIT; char hex[GIT_MAX_HEXSZ + 1]; int show_raw_time = !!(opt & OUTPUT_RAW_TIMESTAMP); const char *default_color = NULL, *color = NULL, *reset = NULL; get_commit_info(suspect->commit, &ci, 1); oid_to_hex_r(hex, &suspect->commit->object.oid); cp = blame_nth_line(sb, ent->lno); if (opt & OUTPUT_SHOW_AGE_WITH_COLOR) { determine_line_heat(&ci, &default_color); color = default_color; reset = GIT_COLOR_RESET; } if (abbrev < MINIMUM_ABBREV) BUG("abbreviation is smaller than minimum length: %d < %d", abbrev, MINIMUM_ABBREV); for (cnt = 0; cnt < ent->num_lines; cnt++) { char ch; size_t length = (opt & OUTPUT_LONG_OBJECT_NAME) ? the_hash_algo->hexsz : (size_t) abbrev; if (opt & OUTPUT_COLOR_LINE) { if (cnt > 0) { color = repeated_meta_color; reset = GIT_COLOR_RESET; } else { color = default_color ? default_color : NULL; reset = default_color ? GIT_COLOR_RESET : NULL; } } if (color) fputs(color, stdout); if (suspect->commit->object.flags & UNINTERESTING) { if (blank_boundary) { memset(hex, ' ', strlen(hex)); } else if (!(opt & OUTPUT_ANNOTATE_COMPAT)) { length--; putchar('^'); } } if (mark_unblamable_lines && ent->unblamable) { length--; putchar('*'); } if (mark_ignored_lines && ent->ignored) { length--; putchar('?'); } printf("%.*s", (int)(length < GIT_MAX_HEXSZ ? length : GIT_MAX_HEXSZ), hex); if (opt & OUTPUT_ANNOTATE_COMPAT) { const char *name; if (opt & OUTPUT_SHOW_EMAIL) name = ci.author_mail.buf; else name = ci.author.buf; printf("\t(%10s\t%10s\t%d)", name, format_time(ci.author_time, ci.author_tz.buf, show_raw_time), ent->lno + 1 + cnt); } else { if (opt & OUTPUT_SHOW_SCORE) printf(" %*d %02d", max_score_digits, ent->score, ent->suspect->refcnt); if (opt & OUTPUT_SHOW_NAME) printf(" %-*.*s", longest_file, longest_file, suspect->path); if (opt & OUTPUT_SHOW_NUMBER) printf(" %*d", max_orig_digits, ent->s_lno + 1 + cnt); if (!(opt & OUTPUT_NO_AUTHOR)) { const char *name; int pad; if (opt & OUTPUT_SHOW_EMAIL) name = ci.author_mail.buf; else name = ci.author.buf; pad = longest_author - utf8_strwidth(name); printf(" (%s%*s %10s", name, pad, "", format_time(ci.author_time, ci.author_tz.buf, show_raw_time)); } printf(" %*d) ", max_digits, ent->lno + 1 + cnt); } if (reset) fputs(reset, stdout); do { ch = *cp++; putchar(ch); } while (ch != '\n' && cp < sb->final_buf + sb->final_buf_size); } if (sb->final_buf_size && cp[-1] != '\n') putchar('\n'); commit_info_destroy(&ci); } static void output(struct blame_scoreboard *sb, int option) { struct blame_entry *ent; if (option & OUTPUT_PORCELAIN) { for (ent = sb->ent; ent; ent = ent->next) { int count = 0; struct blame_origin *suspect; struct commit *commit = ent->suspect->commit; if (commit->object.flags & MORE_THAN_ONE_PATH) continue; for (suspect = get_blame_suspects(commit); suspect; suspect = suspect->next) { if (suspect->guilty && count++) { commit->object.flags |= MORE_THAN_ONE_PATH; break; } } } } for (ent = sb->ent; ent; ent = ent->next) { if (option & OUTPUT_PORCELAIN) emit_porcelain(sb, ent, option); else { emit_other(sb, ent, option); } } } /* * Add phony grafts for use with -S; this is primarily to * support git's cvsserver that wants to give a linear history * to its clients. */ static int read_ancestry(const char *graft_file) { FILE *fp = fopen_or_warn(graft_file, "r"); struct strbuf buf = STRBUF_INIT; if (!fp) return -1; while (!strbuf_getwholeline(&buf, fp, '\n')) { /* The format is just "Commit Parent1 Parent2 ...\n" */ struct commit_graft *graft = read_graft_line(&buf); if (graft) register_commit_graft(the_repository, graft, 0); } fclose(fp); strbuf_release(&buf); return 0; } static int update_auto_abbrev(int auto_abbrev, struct blame_origin *suspect) { const char *uniq = repo_find_unique_abbrev(the_repository, &suspect->commit->object.oid, auto_abbrev); int len = strlen(uniq); if (auto_abbrev < len) return len; return auto_abbrev; } /* * How many columns do we need to show line numbers, authors, * and filenames? */ static void find_alignment(struct blame_scoreboard *sb, int *option) { int longest_src_lines = 0; int longest_dst_lines = 0; unsigned largest_score = 0; struct blame_entry *e; int compute_auto_abbrev = (abbrev < 0); int auto_abbrev = DEFAULT_ABBREV; for (e = sb->ent; e; e = e->next) { struct blame_origin *suspect = e->suspect; int num; if (compute_auto_abbrev) auto_abbrev = update_auto_abbrev(auto_abbrev, suspect); if (strcmp(suspect->path, sb->path)) *option |= OUTPUT_SHOW_NAME; num = strlen(suspect->path); if (longest_file < num) longest_file = num; if (!(suspect->commit->object.flags & METAINFO_SHOWN)) { struct commit_info ci = COMMIT_INFO_INIT; suspect->commit->object.flags |= METAINFO_SHOWN; get_commit_info(suspect->commit, &ci, 1); if (*option & OUTPUT_SHOW_EMAIL) num = utf8_strwidth(ci.author_mail.buf); else num = utf8_strwidth(ci.author.buf); if (longest_author < num) longest_author = num; commit_info_destroy(&ci); } num = e->s_lno + e->num_lines; if (longest_src_lines < num) longest_src_lines = num; num = e->lno + e->num_lines; if (longest_dst_lines < num) longest_dst_lines = num; if (largest_score < blame_entry_score(sb, e)) largest_score = blame_entry_score(sb, e); } max_orig_digits = decimal_width(longest_src_lines); max_digits = decimal_width(longest_dst_lines); max_score_digits = decimal_width(largest_score); if (compute_auto_abbrev) /* one more abbrev length is needed for the boundary commit */ abbrev = auto_abbrev + 1; } static void sanity_check_on_fail(struct blame_scoreboard *sb, int baa) { int opt = OUTPUT_SHOW_SCORE | OUTPUT_SHOW_NUMBER | OUTPUT_SHOW_NAME; find_alignment(sb, &opt); output(sb, opt); die("Baa %d!", baa); } static unsigned parse_score(const char *arg) { char *end; unsigned long score = strtoul(arg, &end, 10); if (*end) return 0; return score; } static char *add_prefix(const char *prefix, const char *path) { return prefix_path(prefix, prefix ? strlen(prefix) : 0, path); } static int git_blame_config(const char *var, const char *value, const struct config_context *ctx, void *cb) { if (!strcmp(var, "blame.showroot")) { show_root = git_config_bool(var, value); return 0; } if (!strcmp(var, "blame.blankboundary")) { blank_boundary = git_config_bool(var, value); return 0; } if (!strcmp(var, "blame.showemail")) { int *output_option = cb; if (git_config_bool(var, value)) *output_option |= OUTPUT_SHOW_EMAIL; else *output_option &= ~OUTPUT_SHOW_EMAIL; return 0; } if (!strcmp(var, "blame.date")) { if (!value) return config_error_nonbool(var); parse_date_format(value, &blame_date_mode); return 0; } if (!strcmp(var, "blame.ignorerevsfile")) { char *str; int ret; ret = git_config_pathname(&str, var, value); if (ret) return ret; string_list_insert(&ignore_revs_file_list, str); free(str); return 0; } if (!strcmp(var, "blame.markunblamablelines")) { mark_unblamable_lines = git_config_bool(var, value); return 0; } if (!strcmp(var, "blame.markignoredlines")) { mark_ignored_lines = git_config_bool(var, value); return 0; } if (!strcmp(var, "color.blame.repeatedlines")) { if (color_parse_mem(value, strlen(value), repeated_meta_color)) warning(_("invalid value for '%s': '%s'"), "color.blame.repeatedLines", value); return 0; } if (!strcmp(var, "color.blame.highlightrecent")) { parse_color_fields(value); return 0; } if (!strcmp(var, "blame.coloring")) { if (!value) return config_error_nonbool(var); if (!strcmp(value, "repeatedLines")) { coloring_mode |= OUTPUT_COLOR_LINE; } else if (!strcmp(value, "highlightRecent")) { coloring_mode |= OUTPUT_SHOW_AGE_WITH_COLOR; } else if (!strcmp(value, "none")) { coloring_mode &= ~(OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR); } else { warning(_("invalid value for '%s': '%s'"), "blame.coloring", value); return 0; } } if (git_diff_heuristic_config(var, value, cb) < 0) return -1; if (userdiff_config(var, value) < 0) return -1; return git_default_config(var, value, ctx, cb); } static int blame_copy_callback(const struct option *option, const char *arg, int unset) { int *opt = option->value; BUG_ON_OPT_NEG(unset); /* * -C enables copy from removed files; * -C -C enables copy from existing files, but only * when blaming a new file; * -C -C -C enables copy from existing files for * everybody */ if (*opt & PICKAXE_BLAME_COPY_HARDER) *opt |= PICKAXE_BLAME_COPY_HARDEST; if (*opt & PICKAXE_BLAME_COPY) *opt |= PICKAXE_BLAME_COPY_HARDER; *opt |= PICKAXE_BLAME_COPY | PICKAXE_BLAME_MOVE; if (arg) blame_copy_score = parse_score(arg); return 0; } static int blame_move_callback(const struct option *option, const char *arg, int unset) { int *opt = option->value; BUG_ON_OPT_NEG(unset); *opt |= PICKAXE_BLAME_MOVE; if (arg) blame_move_score = parse_score(arg); return 0; } static int is_a_rev(const char *name) { struct object_id oid; if (repo_get_oid(the_repository, name, &oid)) return 0; return OBJ_NONE < oid_object_info(the_repository, &oid, NULL); } static int peel_to_commit_oid(struct object_id *oid_ret, void *cbdata) { struct repository *r = ((struct blame_scoreboard *)cbdata)->repo; struct object_id oid; oidcpy(&oid, oid_ret); while (1) { struct object *obj; int kind = oid_object_info(r, &oid, NULL); if (kind == OBJ_COMMIT) { oidcpy(oid_ret, &oid); return 0; } if (kind != OBJ_TAG) return -1; obj = deref_tag(r, parse_object(r, &oid), NULL, 0); if (!obj) return -1; oidcpy(&oid, &obj->oid); } } static void build_ignorelist(struct blame_scoreboard *sb, struct string_list *ignore_revs_file_list, struct string_list *ignore_rev_list) { struct string_list_item *i; struct object_id oid; oidset_init(&sb->ignore_list, 0); for_each_string_list_item(i, ignore_revs_file_list) { if (!strcmp(i->string, "")) oidset_clear(&sb->ignore_list); else oidset_parse_file_carefully(&sb->ignore_list, i->string, the_repository->hash_algo, peel_to_commit_oid, sb); } for_each_string_list_item(i, ignore_rev_list) { if (repo_get_oid_committish(the_repository, i->string, &oid) || peel_to_commit_oid(&oid, sb)) die(_("cannot find revision %s to ignore"), i->string); oidset_insert(&sb->ignore_list, &oid); } } int cmd_blame(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct rev_info revs; char *path = NULL; struct blame_scoreboard sb; struct blame_origin *o; struct blame_entry *ent = NULL; long dashdash_pos, lno; struct progress_info pi = { NULL, 0 }; struct string_list range_list = STRING_LIST_INIT_NODUP; struct string_list ignore_rev_list = STRING_LIST_INIT_NODUP; int output_option = 0, opt = 0; int show_stats = 0; const char *revs_file = NULL; const char *contents_from = NULL; const struct option options[] = { OPT_BOOL(0, "incremental", &incremental, N_("show blame entries as we find them, incrementally")), OPT_BOOL('b', NULL, &blank_boundary, N_("do not show object names of boundary commits (Default: off)")), OPT_BOOL(0, "root", &show_root, N_("do not treat root commits as boundaries (Default: off)")), OPT_BOOL(0, "show-stats", &show_stats, N_("show work cost statistics")), OPT_BOOL(0, "progress", &show_progress, N_("force progress reporting")), OPT_BIT(0, "score-debug", &output_option, N_("show output score for blame entries"), OUTPUT_SHOW_SCORE), OPT_BIT('f', "show-name", &output_option, N_("show original filename (Default: auto)"), OUTPUT_SHOW_NAME), OPT_BIT('n', "show-number", &output_option, N_("show original linenumber (Default: off)"), OUTPUT_SHOW_NUMBER), OPT_BIT('p', "porcelain", &output_option, N_("show in a format designed for machine consumption"), OUTPUT_PORCELAIN), OPT_BIT(0, "line-porcelain", &output_option, N_("show porcelain format with per-line commit information"), OUTPUT_PORCELAIN|OUTPUT_LINE_PORCELAIN), OPT_BIT('c', NULL, &output_option, N_("use the same output mode as git-annotate (Default: off)"), OUTPUT_ANNOTATE_COMPAT), OPT_BIT('t', NULL, &output_option, N_("show raw timestamp (Default: off)"), OUTPUT_RAW_TIMESTAMP), OPT_BIT('l', NULL, &output_option, N_("show long commit SHA1 (Default: off)"), OUTPUT_LONG_OBJECT_NAME), OPT_BIT('s', NULL, &output_option, N_("suppress author name and timestamp (Default: off)"), OUTPUT_NO_AUTHOR), OPT_BIT('e', "show-email", &output_option, N_("show author email instead of name (Default: off)"), OUTPUT_SHOW_EMAIL), OPT_BIT('w', NULL, &xdl_opts, N_("ignore whitespace differences"), XDF_IGNORE_WHITESPACE), OPT_STRING_LIST(0, "ignore-rev", &ignore_rev_list, N_("rev"), N_("ignore when blaming")), OPT_STRING_LIST(0, "ignore-revs-file", &ignore_revs_file_list, N_("file"), N_("ignore revisions from ")), OPT_BIT(0, "color-lines", &output_option, N_("color redundant metadata from previous line differently"), OUTPUT_COLOR_LINE), OPT_BIT(0, "color-by-age", &output_option, N_("color lines by age"), OUTPUT_SHOW_AGE_WITH_COLOR), OPT_BIT(0, "minimal", &xdl_opts, N_("spend extra cycles to find better match"), XDF_NEED_MINIMAL), OPT_STRING('S', NULL, &revs_file, N_("file"), N_("use revisions from instead of calling git-rev-list")), OPT_STRING(0, "contents", &contents_from, N_("file"), N_("use 's contents as the final image")), OPT_CALLBACK_F('C', NULL, &opt, N_("score"), N_("find line copies within and across files"), PARSE_OPT_OPTARG, blame_copy_callback), OPT_CALLBACK_F('M', NULL, &opt, N_("score"), N_("find line movements within and across files"), PARSE_OPT_OPTARG, blame_move_callback), OPT_STRING_LIST('L', NULL, &range_list, N_("range"), N_("process only line range , or function :")), OPT__ABBREV(&abbrev), OPT_END() }; struct parse_opt_ctx_t ctx; int cmd_is_annotate = !strcmp(argv[0], "annotate"); struct range_set ranges; unsigned int range_i; long anchor; long num_lines = 0; const char *str_usage = cmd_is_annotate ? annotate_usage : blame_usage; const char **opt_usage = cmd_is_annotate ? annotate_opt_usage : blame_opt_usage; setup_default_color_by_age(); git_config(git_blame_config, &output_option); repo_init_revisions(the_repository, &revs, NULL); revs.date_mode = blame_date_mode; revs.diffopt.flags.allow_textconv = 1; revs.diffopt.flags.follow_renames = 1; save_commit_buffer = 0; dashdash_pos = 0; show_progress = -1; parse_options_start(&ctx, argc, argv, prefix, options, PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0); for (;;) { switch (parse_options_step(&ctx, options, opt_usage)) { case PARSE_OPT_NON_OPTION: case PARSE_OPT_UNKNOWN: break; case PARSE_OPT_HELP: case PARSE_OPT_ERROR: case PARSE_OPT_SUBCOMMAND: exit(129); case PARSE_OPT_COMPLETE: exit(0); case PARSE_OPT_DONE: if (ctx.argv[0]) dashdash_pos = ctx.cpidx; goto parse_done; } if (!strcmp(ctx.argv[0], "--reverse")) { ctx.argv[0] = "--children"; reverse = 1; } parse_revision_opt(&revs, &ctx, options, opt_usage); } parse_done: revision_opts_finish(&revs); no_whole_file_rename = !revs.diffopt.flags.follow_renames; xdl_opts |= revs.diffopt.xdl_opts & XDF_INDENT_HEURISTIC; revs.diffopt.flags.follow_renames = 0; argc = parse_options_end(&ctx); prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; if (incremental || (output_option & OUTPUT_PORCELAIN)) { if (show_progress > 0) die(_("--progress can't be used with --incremental or porcelain formats")); show_progress = 0; } else if (show_progress < 0) show_progress = isatty(2); if (0 < abbrev && abbrev < (int)the_hash_algo->hexsz) /* one more abbrev length is needed for the boundary commit */ abbrev++; else if (!abbrev) abbrev = the_hash_algo->hexsz; if (revs_file && read_ancestry(revs_file)) die_errno("reading graft file '%s' failed", revs_file); if (cmd_is_annotate) { output_option |= OUTPUT_ANNOTATE_COMPAT; blame_date_mode.type = DATE_ISO8601; } else { blame_date_mode = revs.date_mode; } /* The maximum width used to show the dates */ switch (blame_date_mode.type) { case DATE_RFC2822: blame_date_width = sizeof("Thu, 19 Oct 2006 16:00:04 -0700"); break; case DATE_ISO8601_STRICT: blame_date_width = sizeof("2006-10-19T16:00:04-07:00"); break; case DATE_ISO8601: blame_date_width = sizeof("2006-10-19 16:00:04 -0700"); break; case DATE_RAW: blame_date_width = sizeof("1161298804 -0700"); break; case DATE_UNIX: blame_date_width = sizeof("1161298804"); break; case DATE_SHORT: blame_date_width = sizeof("2006-10-19"); break; case DATE_RELATIVE: /* * TRANSLATORS: This string is used to tell us the * maximum display width for a relative timestamp in * "git blame" output. For C locale, "4 years, 11 * months ago", which takes 22 places, is the longest * among various forms of relative timestamps, but * your language may need more or fewer display * columns. */ blame_date_width = utf8_strwidth(_("4 years, 11 months ago")) + 1; /* add the null */ break; case DATE_HUMAN: /* If the year is shown, no time is shown */ blame_date_width = sizeof("Thu Oct 19 16:00"); break; case DATE_NORMAL: blame_date_width = sizeof("Thu Oct 19 16:00:04 2006 -0700"); break; case DATE_STRFTIME: blame_date_width = strlen(show_date(0, 0, blame_date_mode)) + 1; /* add the null */ break; } blame_date_width -= 1; /* strip the null */ if (revs.diffopt.flags.find_copies_harder) opt |= (PICKAXE_BLAME_COPY | PICKAXE_BLAME_MOVE | PICKAXE_BLAME_COPY_HARDER); /* * We have collected options unknown to us in argv[1..unk] * which are to be passed to revision machinery if we are * going to do the "bottom" processing. * * The remaining are: * * (1) if dashdash_pos != 0, it is either * "blame [revisions] -- " or * "blame -- " * * (2) otherwise, it is one of the two: * "blame [revisions] " * "blame " * * Note that we must strip out from the arguments: we do not * want the path pruning but we may want "bottom" processing. */ if (dashdash_pos) { switch (argc - dashdash_pos - 1) { case 2: /* (1b) */ if (argc != 4) usage_with_options(opt_usage, options); /* reorder for the new way: -- */ argv[1] = argv[3]; argv[3] = argv[2]; argv[2] = "--"; /* FALLTHROUGH */ case 1: /* (1a) */ path = add_prefix(prefix, argv[--argc]); argv[argc] = NULL; break; default: usage_with_options(opt_usage, options); } } else { if (argc < 2) usage_with_options(opt_usage, options); if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */ path = add_prefix(prefix, argv[1]); argv[1] = argv[2]; } else { /* (2a) */ if (argc == 2 && is_a_rev(argv[1]) && !repo_get_work_tree(the_repository)) die("missing to blame"); path = add_prefix(prefix, argv[argc - 1]); } argv[argc - 1] = "--"; } revs.disable_stdin = 1; setup_revisions(argc, argv, &revs, NULL); if (!revs.pending.nr && is_bare_repository()) { struct commit *head_commit; struct object_id head_oid; if (!refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", RESOLVE_REF_READING, &head_oid, NULL) || !(head_commit = lookup_commit_reference_gently(revs.repo, &head_oid, 1))) die("no such ref: HEAD"); add_pending_object(&revs, &head_commit->object, "HEAD"); } init_scoreboard(&sb); sb.revs = &revs; sb.contents_from = contents_from; sb.reverse = reverse; sb.repo = the_repository; sb.path = path; build_ignorelist(&sb, &ignore_revs_file_list, &ignore_rev_list); string_list_clear(&ignore_revs_file_list, 0); string_list_clear(&ignore_rev_list, 0); setup_scoreboard(&sb, &o); /* * Changed-path Bloom filters are disabled when looking * for copies. */ if (!(opt & PICKAXE_BLAME_COPY)) setup_blame_bloom_data(&sb); lno = sb.num_lines; if (lno && !range_list.nr) string_list_append(&range_list, "1"); anchor = 1; range_set_init(&ranges, range_list.nr); for (range_i = 0; range_i < range_list.nr; ++range_i) { long bottom, top; if (parse_range_arg(range_list.items[range_i].string, nth_line_cb, &sb, lno, anchor, &bottom, &top, sb.path, the_repository->index)) usage(str_usage); if ((!lno && (top || bottom)) || lno < bottom) die(Q_("file %s has only %lu line", "file %s has only %lu lines", lno), sb.path, lno); if (bottom < 1) bottom = 1; if (top < 1 || lno < top) top = lno; bottom--; range_set_append_unsafe(&ranges, bottom, top); anchor = top + 1; } sort_and_merge_range_set(&ranges); for (range_i = ranges.nr; range_i > 0; --range_i) { const struct range *r = &ranges.ranges[range_i - 1]; ent = blame_entry_prepend(ent, r->start, r->end, o); num_lines += (r->end - r->start); } if (!num_lines) num_lines = sb.num_lines; o->suspects = ent; prio_queue_put(&sb.commits, o->commit); blame_origin_decref(o); range_set_release(&ranges); string_list_clear(&range_list, 0); sb.ent = NULL; if (blame_move_score) sb.move_score = blame_move_score; if (blame_copy_score) sb.copy_score = blame_copy_score; sb.debug = DEBUG_BLAME; sb.on_sanity_fail = &sanity_check_on_fail; sb.show_root = show_root; sb.xdl_opts = xdl_opts; sb.no_whole_file_rename = no_whole_file_rename; read_mailmap(&mailmap); sb.found_guilty_entry = &found_guilty_entry; sb.found_guilty_entry_data = π if (show_progress) pi.progress = start_delayed_progress(_("Blaming lines"), num_lines); assign_blame(&sb, opt); stop_progress(&pi.progress); if (!incremental) setup_pager(); else goto cleanup; blame_sort_final(&sb); blame_coalesce(&sb); if (!(output_option & (OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR))) output_option |= coloring_mode; if (!(output_option & OUTPUT_PORCELAIN)) { find_alignment(&sb, &output_option); if (!*repeated_meta_color && (output_option & OUTPUT_COLOR_LINE)) xsnprintf(repeated_meta_color, sizeof(repeated_meta_color), "%s", GIT_COLOR_CYAN); } if (output_option & OUTPUT_ANNOTATE_COMPAT) output_option &= ~(OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR); output(&sb, output_option); if (show_stats) { printf("num read blob: %d\n", sb.num_read_blob); printf("num get patch: %d\n", sb.num_get_patch); printf("num commits: %d\n", sb.num_commits); } cleanup: for (ent = sb.ent; ent; ) { struct blame_entry *e = ent->next; free(ent); ent = e; } free(path); cleanup_scoreboard(&sb); release_revisions(&revs); return 0; } git-cinnabar-0.7.0/git-core/builtin/branch.c000064400000000000000000000767051046102023000167650ustar 00000000000000/* * Builtin "git branch" * * Copyright (c) 2006 Kristian Høgsberg * Based on git-branch.sh by Junio C Hamano. */ #define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "config.h" #include "color.h" #include "editor.h" #include "environment.h" #include "refs.h" #include "commit.h" #include "gettext.h" #include "object-name.h" #include "remote.h" #include "parse-options.h" #include "branch.h" #include "path.h" #include "string-list.h" #include "column.h" #include "utf8.h" #include "ref-filter.h" #include "worktree.h" #include "help.h" #include "advice.h" #include "commit-reach.h" static const char * const builtin_branch_usage[] = { N_("git branch [] [-r | -a] [--merged] [--no-merged]"), N_("git branch [] [-f] [--recurse-submodules] []"), N_("git branch [] [-l] [...]"), N_("git branch [] [-r] (-d | -D) ..."), N_("git branch [] (-m | -M) [] "), N_("git branch [] (-c | -C) [] "), N_("git branch [] [-r | -a] [--points-at]"), N_("git branch [] [-r | -a] [--format]"), NULL }; static const char *head; static struct object_id head_oid; static int recurse_submodules = 0; static int submodule_propagate_branches = 0; static int branch_use_color = -1; static char branch_colors[][COLOR_MAXLEN] = { GIT_COLOR_RESET, GIT_COLOR_NORMAL, /* PLAIN */ GIT_COLOR_RED, /* REMOTE */ GIT_COLOR_NORMAL, /* LOCAL */ GIT_COLOR_GREEN, /* CURRENT */ GIT_COLOR_BLUE, /* UPSTREAM */ GIT_COLOR_CYAN, /* WORKTREE */ }; enum color_branch { BRANCH_COLOR_RESET = 0, BRANCH_COLOR_PLAIN = 1, BRANCH_COLOR_REMOTE = 2, BRANCH_COLOR_LOCAL = 3, BRANCH_COLOR_CURRENT = 4, BRANCH_COLOR_UPSTREAM = 5, BRANCH_COLOR_WORKTREE = 6 }; static const char *color_branch_slots[] = { [BRANCH_COLOR_RESET] = "reset", [BRANCH_COLOR_PLAIN] = "plain", [BRANCH_COLOR_REMOTE] = "remote", [BRANCH_COLOR_LOCAL] = "local", [BRANCH_COLOR_CURRENT] = "current", [BRANCH_COLOR_UPSTREAM] = "upstream", [BRANCH_COLOR_WORKTREE] = "worktree", }; static struct string_list output = STRING_LIST_INIT_DUP; static unsigned int colopts; define_list_config_array(color_branch_slots); static int git_branch_config(const char *var, const char *value, const struct config_context *ctx, void *cb) { const char *slot_name; if (!strcmp(var, "branch.sort")) { if (!value) return config_error_nonbool(var); string_list_append(cb, value); return 0; } if (starts_with(var, "column.")) return git_column_config(var, value, "branch", &colopts); if (!strcmp(var, "color.branch")) { branch_use_color = git_config_colorbool(var, value); return 0; } if (skip_prefix(var, "color.branch.", &slot_name)) { int slot = LOOKUP_CONFIG(color_branch_slots, slot_name); if (slot < 0) return 0; if (!value) return config_error_nonbool(var); return color_parse(value, branch_colors[slot]); } if (!strcmp(var, "submodule.recurse")) { recurse_submodules = git_config_bool(var, value); return 0; } if (!strcasecmp(var, "submodule.propagateBranches")) { submodule_propagate_branches = git_config_bool(var, value); return 0; } if (git_color_config(var, value, cb) < 0) return -1; return git_default_config(var, value, ctx, cb); } static const char *branch_get_color(enum color_branch ix) { if (want_color(branch_use_color)) return branch_colors[ix]; return ""; } static int branch_merged(int kind, const char *name, struct commit *rev, struct commit *head_rev) { /* * This checks whether the merge bases of branch and HEAD (or * the other branch this branch builds upon) contains the * branch, which means that the branch has already been merged * safely to HEAD (or the other branch). */ struct commit *reference_rev = NULL; const char *reference_name = NULL; void *reference_name_to_free = NULL; int merged; if (kind == FILTER_REFS_BRANCHES) { struct branch *branch = branch_get(name); const char *upstream = branch_get_upstream(branch, NULL); struct object_id oid; if (upstream && (reference_name = reference_name_to_free = refs_resolve_refdup(get_main_ref_store(the_repository), upstream, RESOLVE_REF_READING, &oid, NULL)) != NULL) reference_rev = lookup_commit_reference(the_repository, &oid); } if (!reference_rev) reference_rev = head_rev; merged = reference_rev ? repo_in_merge_bases(the_repository, rev, reference_rev) : 0; if (merged < 0) exit(128); /* * After the safety valve is fully redefined to "check with * upstream, if any, otherwise with HEAD", we should just * return the result of the repo_in_merge_bases() above without * any of the following code, but during the transition period, * a gentle reminder is in order. */ if (head_rev != reference_rev) { int expect = head_rev ? repo_in_merge_bases(the_repository, rev, head_rev) : 0; if (expect < 0) exit(128); if (expect == merged) ; /* okay */ else if (merged) warning(_("deleting branch '%s' that has been merged to\n" " '%s', but not yet merged to HEAD"), name, reference_name); else warning(_("not deleting branch '%s' that is not yet merged to\n" " '%s', even though it is merged to HEAD"), name, reference_name); } free(reference_name_to_free); return merged; } static int check_branch_commit(const char *branchname, const char *refname, const struct object_id *oid, struct commit *head_rev, int kinds, int force) { struct commit *rev = lookup_commit_reference(the_repository, oid); if (!force && !rev) { error(_("couldn't look up commit object for '%s'"), refname); return -1; } if (!force && !branch_merged(kinds, branchname, rev, head_rev)) { error(_("the branch '%s' is not fully merged"), branchname); advise_if_enabled(ADVICE_FORCE_DELETE_BRANCH, _("If you are sure you want to delete it, " "run 'git branch -D %s'"), branchname); return -1; } return 0; } static void delete_branch_config(const char *branchname) { struct strbuf buf = STRBUF_INIT; strbuf_addf(&buf, "branch.%s", branchname); if (repo_config_rename_section(the_repository, buf.buf, NULL) < 0) warning(_("update of config-file failed")); strbuf_release(&buf); } static int delete_branches(int argc, const char **argv, int force, int kinds, int quiet) { struct commit *head_rev = NULL; struct object_id oid; char *name = NULL; const char *fmt; int i; int ret = 0; int remote_branch = 0; struct strbuf bname = STRBUF_INIT; unsigned allowed_interpret; struct string_list refs_to_delete = STRING_LIST_INIT_DUP; struct string_list_item *item; int branch_name_pos; const char *fmt_remotes = "refs/remotes/%s"; switch (kinds) { case FILTER_REFS_REMOTES: fmt = fmt_remotes; /* For subsequent UI messages */ remote_branch = 1; allowed_interpret = INTERPRET_BRANCH_REMOTE; force = 1; break; case FILTER_REFS_BRANCHES: fmt = "refs/heads/%s"; allowed_interpret = INTERPRET_BRANCH_LOCAL; break; default: die(_("cannot use -a with -d")); } branch_name_pos = strcspn(fmt, "%"); if (!force) head_rev = lookup_commit_reference(the_repository, &head_oid); for (i = 0; i < argc; i++, strbuf_reset(&bname)) { char *target = NULL; int flags = 0; copy_branchname(&bname, argv[i], allowed_interpret); free(name); name = mkpathdup(fmt, bname.buf); if (kinds == FILTER_REFS_BRANCHES) { const char *path; if ((path = branch_checked_out(name))) { error(_("cannot delete branch '%s' " "used by worktree at '%s'"), bname.buf, path); ret = 1; continue; } } target = refs_resolve_refdup(get_main_ref_store(the_repository), name, RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE | RESOLVE_REF_ALLOW_BAD_NAME, &oid, &flags); if (!target) { if (remote_branch) { error(_("remote-tracking branch '%s' not found"), bname.buf); } else { char *virtual_name = mkpathdup(fmt_remotes, bname.buf); char *virtual_target = refs_resolve_refdup(get_main_ref_store(the_repository), virtual_name, RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE | RESOLVE_REF_ALLOW_BAD_NAME, &oid, &flags); FREE_AND_NULL(virtual_name); if (virtual_target) error(_("branch '%s' not found.\n" "Did you forget --remote?"), bname.buf); else error(_("branch '%s' not found"), bname.buf); FREE_AND_NULL(virtual_target); } ret = 1; continue; } if (!(flags & (REF_ISSYMREF|REF_ISBROKEN)) && check_branch_commit(bname.buf, name, &oid, head_rev, kinds, force)) { ret = 1; goto next; } item = string_list_append(&refs_to_delete, name); item->util = xstrdup((flags & REF_ISBROKEN) ? "broken" : (flags & REF_ISSYMREF) ? target : repo_find_unique_abbrev(the_repository, &oid, DEFAULT_ABBREV)); next: free(target); } if (refs_delete_refs(get_main_ref_store(the_repository), NULL, &refs_to_delete, REF_NO_DEREF)) ret = 1; for_each_string_list_item(item, &refs_to_delete) { char *describe_ref = item->util; char *name = item->string; if (!refs_ref_exists(get_main_ref_store(the_repository), name)) { char *refname = name + branch_name_pos; if (!quiet) printf(remote_branch ? _("Deleted remote-tracking branch %s (was %s).\n") : _("Deleted branch %s (was %s).\n"), name + branch_name_pos, describe_ref); delete_branch_config(refname); } free(describe_ref); } string_list_clear(&refs_to_delete, 0); free(name); strbuf_release(&bname); return ret; } static int calc_maxwidth(struct ref_array *refs, int remote_bonus) { int i, max = 0; for (i = 0; i < refs->nr; i++) { struct ref_array_item *it = refs->items[i]; const char *desc = it->refname; int w; skip_prefix(it->refname, "refs/heads/", &desc); skip_prefix(it->refname, "refs/remotes/", &desc); if (it->kind == FILTER_REFS_DETACHED_HEAD) { char *head_desc = get_head_description(); w = utf8_strwidth(head_desc); free(head_desc); } else w = utf8_strwidth(desc); if (it->kind == FILTER_REFS_REMOTES) w += remote_bonus; if (w > max) max = w; } return max; } static const char *quote_literal_for_format(const char *s) { static struct strbuf buf = STRBUF_INIT; strbuf_reset(&buf); while (strbuf_expand_step(&buf, &s)) strbuf_addstr(&buf, "%%"); return buf.buf; } static char *build_format(struct ref_filter *filter, int maxwidth, const char *remote_prefix) { struct strbuf fmt = STRBUF_INIT; struct strbuf local = STRBUF_INIT; struct strbuf remote = STRBUF_INIT; strbuf_addf(&local, "%%(if)%%(HEAD)%%(then)* %s%%(else)%%(if)%%(worktreepath)%%(then)+ %s%%(else) %s%%(end)%%(end)", branch_get_color(BRANCH_COLOR_CURRENT), branch_get_color(BRANCH_COLOR_WORKTREE), branch_get_color(BRANCH_COLOR_LOCAL)); strbuf_addf(&remote, " %s", branch_get_color(BRANCH_COLOR_REMOTE)); if (filter->verbose) { struct strbuf obname = STRBUF_INIT; if (filter->abbrev < 0) strbuf_addf(&obname, "%%(objectname:short)"); else if (!filter->abbrev) strbuf_addf(&obname, "%%(objectname)"); else strbuf_addf(&obname, "%%(objectname:short=%d)", filter->abbrev); strbuf_addf(&local, "%%(align:%d,left)%%(refname:lstrip=2)%%(end)", maxwidth); strbuf_addstr(&local, branch_get_color(BRANCH_COLOR_RESET)); strbuf_addf(&local, " %s ", obname.buf); if (filter->verbose > 1) { strbuf_addf(&local, "%%(if:notequals=*)%%(HEAD)%%(then)%%(if)%%(worktreepath)%%(then)(%s%%(worktreepath)%s) %%(end)%%(end)", branch_get_color(BRANCH_COLOR_WORKTREE), branch_get_color(BRANCH_COLOR_RESET)); strbuf_addf(&local, "%%(if)%%(upstream)%%(then)[%s%%(upstream:short)%s%%(if)%%(upstream:track)" "%%(then): %%(upstream:track,nobracket)%%(end)] %%(end)%%(contents:subject)", branch_get_color(BRANCH_COLOR_UPSTREAM), branch_get_color(BRANCH_COLOR_RESET)); } else strbuf_addf(&local, "%%(if)%%(upstream:track)%%(then)%%(upstream:track) %%(end)%%(contents:subject)"); strbuf_addf(&remote, "%%(align:%d,left)%s%%(refname:lstrip=2)%%(end)%s" "%%(if)%%(symref)%%(then) -> %%(symref:short)" "%%(else) %s %%(contents:subject)%%(end)", maxwidth, quote_literal_for_format(remote_prefix), branch_get_color(BRANCH_COLOR_RESET), obname.buf); strbuf_release(&obname); } else { strbuf_addf(&local, "%%(refname:lstrip=2)%s%%(if)%%(symref)%%(then) -> %%(symref:short)%%(end)", branch_get_color(BRANCH_COLOR_RESET)); strbuf_addf(&remote, "%s%%(refname:lstrip=2)%s%%(if)%%(symref)%%(then) -> %%(symref:short)%%(end)", quote_literal_for_format(remote_prefix), branch_get_color(BRANCH_COLOR_RESET)); } strbuf_addf(&fmt, "%%(if:notequals=refs/remotes)%%(refname:rstrip=-2)%%(then)%s%%(else)%s%%(end)", local.buf, remote.buf); strbuf_release(&local); strbuf_release(&remote); return strbuf_detach(&fmt, NULL); } static void print_ref_list(struct ref_filter *filter, struct ref_sorting *sorting, struct ref_format *format, struct string_list *output) { int i; struct ref_array array; int maxwidth = 0; const char *remote_prefix = ""; char *to_free = NULL; /* * If we are listing more than just remote branches, * then remote branches will have a "remotes/" prefix. * We need to account for this in the width. */ if (filter->kind != FILTER_REFS_REMOTES) remote_prefix = "remotes/"; memset(&array, 0, sizeof(array)); filter_refs(&array, filter, filter->kind); if (filter->verbose) maxwidth = calc_maxwidth(&array, strlen(remote_prefix)); if (!format->format) format->format = to_free = build_format(filter, maxwidth, remote_prefix); format->use_color = branch_use_color; if (verify_ref_format(format)) die(_("unable to parse format string")); filter_ahead_behind(the_repository, &array); ref_array_sort(sorting, &array); if (column_active(colopts)) { struct strbuf out = STRBUF_INIT, err = STRBUF_INIT; assert(!filter->verbose && "--column and --verbose are incompatible"); for (i = 0; i < array.nr; i++) { strbuf_reset(&err); strbuf_reset(&out); if (format_ref_array_item(array.items[i], format, &out, &err)) die("%s", err.buf); /* format to a string_list to let print_columns() do its job */ string_list_append(output, out.buf); } strbuf_release(&err); strbuf_release(&out); } else { print_formatted_ref_array(&array, format); } ref_array_clear(&array); free(to_free); } static void print_current_branch_name(void) { int flags; const char *refname = refs_resolve_ref_unsafe(get_main_ref_store(the_repository), "HEAD", 0, NULL, &flags); const char *shortname; if (!refname) die(_("could not resolve HEAD")); else if (!(flags & REF_ISSYMREF)) return; else if (skip_prefix(refname, "refs/heads/", &shortname)) puts(shortname); else die(_("HEAD (%s) points outside of refs/heads/"), refname); } static void reject_rebase_or_bisect_branch(struct worktree **worktrees, const char *target) { int i; for (i = 0; worktrees[i]; i++) { struct worktree *wt = worktrees[i]; if (!wt->is_detached) continue; if (is_worktree_being_rebased(wt, target)) die(_("branch %s is being rebased at %s"), target, wt->path); if (is_worktree_being_bisected(wt, target)) die(_("branch %s is being bisected at %s"), target, wt->path); } } /* * Update all per-worktree HEADs pointing at the old ref to point the new ref. * This will be used when renaming a branch. Returns 0 if successful, non-zero * otherwise. */ static int replace_each_worktree_head_symref(struct worktree **worktrees, const char *oldref, const char *newref, const char *logmsg) { int ret = 0; int i; for (i = 0; worktrees[i]; i++) { struct ref_store *refs; if (worktrees[i]->is_detached) continue; if (!worktrees[i]->head_ref) continue; if (strcmp(oldref, worktrees[i]->head_ref)) continue; refs = get_worktree_ref_store(worktrees[i]); if (refs_update_symref(refs, "HEAD", newref, logmsg)) ret = error(_("HEAD of working tree %s is not updated"), worktrees[i]->path); } return ret; } #define IS_HEAD 1 #define IS_ORPHAN 2 static void copy_or_rename_branch(const char *oldname, const char *newname, int copy, int force) { struct strbuf oldref = STRBUF_INIT, newref = STRBUF_INIT, logmsg = STRBUF_INIT; struct strbuf oldsection = STRBUF_INIT, newsection = STRBUF_INIT; const char *interpreted_oldname = NULL; const char *interpreted_newname = NULL; int recovery = 0, oldref_usage = 0; struct worktree **worktrees = get_worktrees(); if (check_branch_ref(&oldref, oldname)) { /* * Bad name --- this could be an attempt to rename a * ref that we used to allow to be created by accident. */ if (refs_ref_exists(get_main_ref_store(the_repository), oldref.buf)) recovery = 1; else { int code = die_message(_("invalid branch name: '%s'"), oldname); advise_if_enabled(ADVICE_REF_SYNTAX, _("See `man git check-ref-format`")); exit(code); } } for (int i = 0; worktrees[i]; i++) { struct worktree *wt = worktrees[i]; if (wt->head_ref && !strcmp(oldref.buf, wt->head_ref)) { oldref_usage |= IS_HEAD; if (is_null_oid(&wt->head_oid)) oldref_usage |= IS_ORPHAN; break; } } if ((copy || !(oldref_usage & IS_HEAD)) && !refs_ref_exists(get_main_ref_store(the_repository), oldref.buf)) { if (oldref_usage & IS_HEAD) die(_("no commit on branch '%s' yet"), oldname); else die(_("no branch named '%s'"), oldname); } /* * A command like "git branch -M currentbranch currentbranch" cannot * cause the worktree to become inconsistent with HEAD, so allow it. */ if (!strcmp(oldname, newname)) validate_branchname(newname, &newref); else validate_new_branchname(newname, &newref, force); reject_rebase_or_bisect_branch(worktrees, oldref.buf); if (!skip_prefix(oldref.buf, "refs/heads/", &interpreted_oldname) || !skip_prefix(newref.buf, "refs/heads/", &interpreted_newname)) { BUG("expected prefix missing for refs"); } if (copy) strbuf_addf(&logmsg, "Branch: copied %s to %s", oldref.buf, newref.buf); else strbuf_addf(&logmsg, "Branch: renamed %s to %s", oldref.buf, newref.buf); if (!copy && !(oldref_usage & IS_ORPHAN) && refs_rename_ref(get_main_ref_store(the_repository), oldref.buf, newref.buf, logmsg.buf)) die(_("branch rename failed")); if (copy && refs_copy_existing_ref(get_main_ref_store(the_repository), oldref.buf, newref.buf, logmsg.buf)) die(_("branch copy failed")); if (recovery) { if (copy) warning(_("created a copy of a misnamed branch '%s'"), interpreted_oldname); else warning(_("renamed a misnamed branch '%s' away"), interpreted_oldname); } if (!copy && (oldref_usage & IS_HEAD) && replace_each_worktree_head_symref(worktrees, oldref.buf, newref.buf, logmsg.buf)) die(_("branch renamed to %s, but HEAD is not updated"), newname); strbuf_release(&logmsg); strbuf_addf(&oldsection, "branch.%s", interpreted_oldname); strbuf_addf(&newsection, "branch.%s", interpreted_newname); if (!copy && repo_config_rename_section(the_repository, oldsection.buf, newsection.buf) < 0) die(_("branch is renamed, but update of config-file failed")); if (copy && strcmp(interpreted_oldname, interpreted_newname) && repo_config_copy_section(the_repository, oldsection.buf, newsection.buf) < 0) die(_("branch is copied, but update of config-file failed")); strbuf_release(&oldref); strbuf_release(&newref); strbuf_release(&oldsection); strbuf_release(&newsection); free_worktrees(worktrees); } static GIT_PATH_FUNC(edit_description, "EDIT_DESCRIPTION") static int edit_branch_description(const char *branch_name) { int exists; struct strbuf buf = STRBUF_INIT; struct strbuf name = STRBUF_INIT; exists = !read_branch_desc(&buf, branch_name); if (!buf.len || buf.buf[buf.len-1] != '\n') strbuf_addch(&buf, '\n'); strbuf_commented_addf(&buf, comment_line_str, _("Please edit the description for the branch\n" " %s\n" "Lines starting with '%s' will be stripped.\n"), branch_name, comment_line_str); write_file_buf(edit_description(), buf.buf, buf.len); strbuf_reset(&buf); if (launch_editor(edit_description(), &buf, NULL)) { strbuf_release(&buf); return -1; } strbuf_stripspace(&buf, comment_line_str); strbuf_addf(&name, "branch.%s.description", branch_name); if (buf.len || exists) git_config_set(name.buf, buf.len ? buf.buf : NULL); strbuf_release(&name); strbuf_release(&buf); return 0; } int cmd_branch(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { /* possible actions */ int delete = 0, rename = 0, copy = 0, list = 0, unset_upstream = 0, show_current = 0, edit_description = 0; const char *new_upstream = NULL; int noncreate_actions = 0; /* possible options */ int reflog = 0, quiet = 0, icase = 0, force = 0, recurse_submodules_explicit = 0; enum branch_track track; struct ref_filter filter = REF_FILTER_INIT; static struct ref_sorting *sorting; struct string_list sorting_options = STRING_LIST_INIT_DUP; struct ref_format format = REF_FORMAT_INIT; int ret; struct option options[] = { OPT_GROUP(N_("Generic options")), OPT__VERBOSE(&filter.verbose, N_("show hash and subject, give twice for upstream branch")), OPT__QUIET(&quiet, N_("suppress informational messages")), OPT_CALLBACK_F('t', "track", &track, "(direct|inherit)", N_("set branch tracking configuration"), PARSE_OPT_OPTARG, parse_opt_tracking_mode), OPT_SET_INT_F(0, "set-upstream", &track, N_("do not use"), BRANCH_TRACK_OVERRIDE, PARSE_OPT_HIDDEN), OPT_STRING('u', "set-upstream-to", &new_upstream, N_("upstream"), N_("change the upstream info")), OPT_BOOL(0, "unset-upstream", &unset_upstream, N_("unset the upstream info")), OPT__COLOR(&branch_use_color, N_("use colored output")), OPT_SET_INT_F('r', "remotes", &filter.kind, N_("act on remote-tracking branches"), FILTER_REFS_REMOTES, PARSE_OPT_NONEG), OPT_CONTAINS(&filter.with_commit, N_("print only branches that contain the commit")), OPT_NO_CONTAINS(&filter.no_commit, N_("print only branches that don't contain the commit")), OPT_WITH(&filter.with_commit, N_("print only branches that contain the commit")), OPT_WITHOUT(&filter.no_commit, N_("print only branches that don't contain the commit")), OPT__ABBREV(&filter.abbrev), OPT_GROUP(N_("Specific git-branch actions:")), OPT_SET_INT_F('a', "all", &filter.kind, N_("list both remote-tracking and local branches"), FILTER_REFS_REMOTES | FILTER_REFS_BRANCHES, PARSE_OPT_NONEG), OPT_BIT('d', "delete", &delete, N_("delete fully merged branch"), 1), OPT_BIT('D', NULL, &delete, N_("delete branch (even if not merged)"), 2), OPT_BIT('m', "move", &rename, N_("move/rename a branch and its reflog"), 1), OPT_BIT('M', NULL, &rename, N_("move/rename a branch, even if target exists"), 2), OPT_BOOL(0, "omit-empty", &format.array_opts.omit_empty, N_("do not output a newline after empty formatted refs")), OPT_BIT('c', "copy", ©, N_("copy a branch and its reflog"), 1), OPT_BIT('C', NULL, ©, N_("copy a branch, even if target exists"), 2), OPT_BOOL('l', "list", &list, N_("list branch names")), OPT_BOOL(0, "show-current", &show_current, N_("show current branch name")), OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")), OPT_BOOL(0, "edit-description", &edit_description, N_("edit the description for the branch")), OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE), OPT_MERGED(&filter, N_("print only branches that are merged")), OPT_NO_MERGED(&filter, N_("print only branches that are not merged")), OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")), OPT_REF_SORT(&sorting_options), OPT_CALLBACK(0, "points-at", &filter.points_at, N_("object"), N_("print only branches of the object"), parse_opt_object_name), OPT_BOOL('i', "ignore-case", &icase, N_("sorting and filtering are case insensitive")), OPT_BOOL(0, "recurse-submodules", &recurse_submodules_explicit, N_("recurse through submodules")), OPT_STRING( 0 , "format", &format.format, N_("format"), N_("format to use for the output")), OPT_END(), }; setup_ref_filter_porcelain_msg(); filter.kind = FILTER_REFS_BRANCHES; filter.abbrev = -1; if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(builtin_branch_usage, options); /* * Try to set sort keys from config. If config does not set any, * fall back on default (refname) sorting. */ git_config(git_branch_config, &sorting_options); if (!sorting_options.nr) string_list_append(&sorting_options, "refname"); track = git_branch_track; head = refs_resolve_refdup(get_main_ref_store(the_repository), "HEAD", 0, &head_oid, NULL); if (!head) die(_("failed to resolve HEAD as a valid ref")); if (!strcmp(head, "HEAD")) filter.detached = 1; else if (!skip_prefix(head, "refs/heads/", &head)) die(_("HEAD not found below refs/heads!")); argc = parse_options(argc, argv, prefix, options, builtin_branch_usage, 0); if (!delete && !rename && !copy && !edit_description && !new_upstream && !show_current && !unset_upstream && argc == 0) list = 1; if (filter.with_commit || filter.no_commit || filter.reachable_from || filter.unreachable_from || filter.points_at.nr) list = 1; noncreate_actions = !!delete + !!rename + !!copy + !!new_upstream + !!show_current + !!list + !!edit_description + !!unset_upstream; if (noncreate_actions > 1) usage_with_options(builtin_branch_usage, options); if (recurse_submodules_explicit) { if (!submodule_propagate_branches) die(_("branch with --recurse-submodules can only be used if submodule.propagateBranches is enabled")); if (noncreate_actions) die(_("--recurse-submodules can only be used to create branches")); } recurse_submodules = (recurse_submodules || recurse_submodules_explicit) && submodule_propagate_branches; if (filter.abbrev == -1) filter.abbrev = DEFAULT_ABBREV; filter.ignore_case = icase; finalize_colopts(&colopts, -1); if (filter.verbose) { if (explicitly_enable_column(colopts)) die(_("options '%s' and '%s' cannot be used together"), "--column", "--verbose"); colopts = 0; } if (force) { delete *= 2; rename *= 2; copy *= 2; } if (list) setup_auto_pager("branch", 1); if (delete) { if (!argc) die(_("branch name required")); ret = delete_branches(argc, argv, delete > 1, filter.kind, quiet); goto out; } else if (show_current) { print_current_branch_name(); ret = 0; goto out; } else if (list) { /* git branch --list also shows HEAD when it is detached */ if ((filter.kind & FILTER_REFS_BRANCHES) && filter.detached) filter.kind |= FILTER_REFS_DETACHED_HEAD; filter.name_patterns = argv; /* * If no sorting parameter is given then we default to sorting * by 'refname'. This would give us an alphabetically sorted * array with the 'HEAD' ref at the beginning followed by * local branches 'refs/heads/...' and finally remote-tracking * branches 'refs/remotes/...'. */ sorting = ref_sorting_options(&sorting_options); ref_sorting_set_sort_flags_all(sorting, REF_SORTING_ICASE, icase); ref_sorting_set_sort_flags_all( sorting, REF_SORTING_DETACHED_HEAD_FIRST, 1); print_ref_list(&filter, sorting, &format, &output); print_columns(&output, colopts, NULL); string_list_clear(&output, 0); ref_sorting_release(sorting); ref_filter_clear(&filter); ret = 0; goto out; } else if (edit_description) { const char *branch_name; struct strbuf branch_ref = STRBUF_INIT; struct strbuf buf = STRBUF_INIT; if (!argc) { if (filter.detached) die(_("cannot give description to detached HEAD")); branch_name = head; } else if (argc == 1) { copy_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL); branch_name = buf.buf; } else { die(_("cannot edit description of more than one branch")); } strbuf_addf(&branch_ref, "refs/heads/%s", branch_name); if (!refs_ref_exists(get_main_ref_store(the_repository), branch_ref.buf)) { error((!argc || branch_checked_out(branch_ref.buf)) ? _("no commit on branch '%s' yet") : _("no branch named '%s'"), branch_name); ret = 1; } else if (!edit_branch_description(branch_name)) { ret = 0; /* happy */ } else { ret = 1; } strbuf_release(&branch_ref); strbuf_release(&buf); goto out; } else if (copy || rename) { if (!argc) die(_("branch name required")); else if ((argc == 1) && filter.detached) die(copy? _("cannot copy the current branch while not on any") : _("cannot rename the current branch while not on any")); else if (argc == 1) copy_or_rename_branch(head, argv[0], copy, copy + rename > 1); else if (argc == 2) copy_or_rename_branch(argv[0], argv[1], copy, copy + rename > 1); else die(copy? _("too many branches for a copy operation") : _("too many arguments for a rename operation")); } else if (new_upstream) { struct branch *branch; struct strbuf buf = STRBUF_INIT; if (!argc) branch = branch_get(NULL); else if (argc == 1) { copy_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL); branch = branch_get(buf.buf); } else die(_("too many arguments to set new upstream")); if (!branch) { if (!argc || !strcmp(argv[0], "HEAD")) die(_("could not set upstream of HEAD to %s when " "it does not point to any branch"), new_upstream); die(_("no such branch '%s'"), argv[0]); } if (!refs_ref_exists(get_main_ref_store(the_repository), branch->refname)) { if (!argc || branch_checked_out(branch->refname)) die(_("no commit on branch '%s' yet"), branch->name); die(_("branch '%s' does not exist"), branch->name); } dwim_and_setup_tracking(the_repository, branch->name, new_upstream, BRANCH_TRACK_OVERRIDE, quiet); strbuf_release(&buf); } else if (unset_upstream) { struct branch *branch; struct strbuf buf = STRBUF_INIT; if (!argc) branch = branch_get(NULL); else if (argc == 1) { copy_branchname(&buf, argv[0], INTERPRET_BRANCH_LOCAL); branch = branch_get(buf.buf); } else die(_("too many arguments to unset upstream")); if (!branch) { if (!argc || !strcmp(argv[0], "HEAD")) die(_("could not unset upstream of HEAD when " "it does not point to any branch")); die(_("no such branch '%s'"), argv[0]); } if (!branch_has_merge_config(branch)) die(_("branch '%s' has no upstream information"), branch->name); strbuf_reset(&buf); strbuf_addf(&buf, "branch.%s.remote", branch->name); git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE); strbuf_reset(&buf); strbuf_addf(&buf, "branch.%s.merge", branch->name); git_config_set_multivar(buf.buf, NULL, NULL, CONFIG_FLAGS_MULTI_REPLACE); strbuf_release(&buf); } else if (!noncreate_actions && argc > 0 && argc <= 2) { const char *branch_name = argv[0]; const char *start_name = argc == 2 ? argv[1] : head; if (filter.kind != FILTER_REFS_BRANCHES) die(_("the -a, and -r, options to 'git branch' do not take a branch name.\n" "Did you mean to use: -a|-r --list ?")); if (track == BRANCH_TRACK_OVERRIDE) die(_("the '--set-upstream' option is no longer supported. Please use '--track' or '--set-upstream-to' instead")); if (recurse_submodules) { create_branches_recursively(the_repository, branch_name, start_name, NULL, force, reflog, quiet, track, 0); ret = 0; goto out; } create_branch(the_repository, branch_name, start_name, force, 0, reflog, quiet, track, 0); } else usage_with_options(builtin_branch_usage, options); ret = 0; out: string_list_clear(&sorting_options, 0); return ret; } git-cinnabar-0.7.0/git-core/builtin/bugreport.c000064400000000000000000000134341046102023000175270ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "abspath.h" #include "editor.h" #include "gettext.h" #include "parse-options.h" #include "strbuf.h" #include "help.h" #include "compat/compiler.h" #include "hook.h" #include "hook-list.h" #include "diagnose.h" #include "object-file.h" #include "setup.h" static void get_system_info(struct strbuf *sys_info) { struct utsname uname_info; char *shell = NULL; /* get git version from native cmd */ strbuf_addstr(sys_info, _("git version:\n")); get_version_info(sys_info, 1); /* system call for other version info */ strbuf_addstr(sys_info, "uname: "); if (uname(&uname_info)) strbuf_addf(sys_info, _("uname() failed with error '%s' (%d)\n"), strerror(errno), errno); else strbuf_addf(sys_info, "%s %s %s %s\n", uname_info.sysname, uname_info.release, uname_info.version, uname_info.machine); strbuf_addstr(sys_info, _("compiler info: ")); get_compiler_info(sys_info); strbuf_addstr(sys_info, _("libc info: ")); get_libc_info(sys_info); shell = getenv("SHELL"); strbuf_addf(sys_info, "$SHELL (typically, interactive shell): %s\n", shell ? shell : ""); } static void get_populated_hooks(struct strbuf *hook_info, int nongit) { const char **p; if (nongit) { strbuf_addstr(hook_info, _("not run from a git repository - no hooks to show\n")); return; } for (p = hook_name_list; *p; p++) { const char *hook = *p; if (hook_exists(the_repository, hook)) strbuf_addf(hook_info, "%s\n", hook); } } static const char * const bugreport_usage[] = { N_("git bugreport [(-o | --output-directory) ]\n" " [(-s | --suffix) | --no-suffix]\n" " [--diagnose[=]]"), NULL }; static int get_bug_template(struct strbuf *template) { const char template_text[] = N_( "Thank you for filling out a Git bug report!\n" "Please answer the following questions to help us understand your issue.\n" "\n" "What did you do before the bug happened? (Steps to reproduce your issue)\n" "\n" "What did you expect to happen? (Expected behavior)\n" "\n" "What happened instead? (Actual behavior)\n" "\n" "What's different between what you expected and what actually happened?\n" "\n" "Anything else you want to add:\n" "\n" "Please review the rest of the bug report below.\n" "You can delete any lines you don't wish to share.\n"); strbuf_addstr(template, _(template_text)); return 0; } static void get_header(struct strbuf *buf, const char *title) { strbuf_addf(buf, "\n\n[%s]\n", title); } int cmd_bugreport(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct strbuf buffer = STRBUF_INIT; struct strbuf report_path = STRBUF_INIT; int report = -1; time_t now = time(NULL); struct tm tm; enum diagnose_mode diagnose = DIAGNOSE_NONE; char *option_output = NULL; const char *option_suffix = "%Y-%m-%d-%H%M"; const char *user_relative_path = NULL; char *prefixed_filename; size_t output_path_len; int ret; const struct option bugreport_options[] = { OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"), N_("create an additional zip archive of detailed diagnostics (default 'stats')"), PARSE_OPT_OPTARG, option_parse_diagnose), OPT_STRING('o', "output-directory", &option_output, N_("path"), N_("specify a destination for the bugreport file(s)")), OPT_STRING('s', "suffix", &option_suffix, N_("format"), N_("specify a strftime format suffix for the filename(s)")), OPT_END() }; argc = parse_options(argc, argv, prefix, bugreport_options, bugreport_usage, 0); if (argc) { error(_("unknown argument `%s'"), argv[0]); usage(bugreport_usage[0]); } /* Prepare the path to put the result */ prefixed_filename = prefix_filename(prefix, option_output ? option_output : ""); strbuf_addstr(&report_path, prefixed_filename); strbuf_complete(&report_path, '/'); output_path_len = report_path.len; strbuf_addstr(&report_path, "git-bugreport"); if (option_suffix) { strbuf_addch(&report_path, '-'); strbuf_addftime(&report_path, option_suffix, localtime_r(&now, &tm), 0, 0); } strbuf_addstr(&report_path, ".txt"); switch (safe_create_leading_directories(report_path.buf)) { case SCLD_OK: case SCLD_EXISTS: break; default: die(_("could not create leading directories for '%s'"), report_path.buf); } /* Prepare diagnostics, if requested */ if (diagnose != DIAGNOSE_NONE) { struct strbuf zip_path = STRBUF_INIT; strbuf_add(&zip_path, report_path.buf, output_path_len); strbuf_addstr(&zip_path, "git-diagnostics-"); strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0); strbuf_addstr(&zip_path, ".zip"); if (create_diagnostics_archive(&zip_path, diagnose)) die_errno(_("unable to create diagnostics archive %s"), zip_path.buf); strbuf_release(&zip_path); } /* Prepare the report contents */ get_bug_template(&buffer); get_header(&buffer, _("System Info")); get_system_info(&buffer); get_header(&buffer, _("Enabled Hooks")); get_populated_hooks(&buffer, !startup_info->have_repository); /* fopen doesn't offer us an O_EXCL alternative, except with glibc. */ report = xopen(report_path.buf, O_CREAT | O_EXCL | O_WRONLY, 0666); if (write_in_full(report, buffer.buf, buffer.len) < 0) die_errno(_("unable to write to %s"), report_path.buf); close(report); /* * We want to print the path relative to the user, but we still need the * path relative to us to give to the editor. */ if (!(prefix && skip_prefix(report_path.buf, prefix, &user_relative_path))) user_relative_path = report_path.buf; fprintf(stderr, _("Created new report at '%s'.\n"), user_relative_path); free(prefixed_filename); strbuf_release(&buffer); ret = !!launch_editor(report_path.buf, NULL, NULL); strbuf_release(&report_path); return ret; } git-cinnabar-0.7.0/git-core/builtin/bundle.c000064400000000000000000000157341046102023000167740ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "abspath.h" #include "gettext.h" #include "setup.h" #include "strvec.h" #include "parse-options.h" #include "pkt-line.h" #include "bundle.h" /* * Basic handler for bundle files to connect repositories via sneakernet. * Invocation must include action. * This function can create a bundle or provide information on an existing * bundle supporting "fetch", "pull", and "ls-remote". */ #define BUILTIN_BUNDLE_CREATE_USAGE \ N_("git bundle create [-q | --quiet | --progress]\n" \ " [--version=] ") #define BUILTIN_BUNDLE_VERIFY_USAGE \ N_("git bundle verify [-q | --quiet] ") #define BUILTIN_BUNDLE_LIST_HEADS_USAGE \ N_("git bundle list-heads [...]") #define BUILTIN_BUNDLE_UNBUNDLE_USAGE \ N_("git bundle unbundle [--progress] [...]") static char const * const builtin_bundle_usage[] = { BUILTIN_BUNDLE_CREATE_USAGE, BUILTIN_BUNDLE_VERIFY_USAGE, BUILTIN_BUNDLE_LIST_HEADS_USAGE, BUILTIN_BUNDLE_UNBUNDLE_USAGE, NULL, }; static const char * const builtin_bundle_create_usage[] = { BUILTIN_BUNDLE_CREATE_USAGE, NULL }; static const char * const builtin_bundle_verify_usage[] = { BUILTIN_BUNDLE_VERIFY_USAGE, NULL }; static const char * const builtin_bundle_list_heads_usage[] = { BUILTIN_BUNDLE_LIST_HEADS_USAGE, NULL }; static const char * const builtin_bundle_unbundle_usage[] = { BUILTIN_BUNDLE_UNBUNDLE_USAGE, NULL }; static int parse_options_cmd_bundle(int argc, const char **argv, const char* prefix, const char * const usagestr[], const struct option options[], char **bundle_file) { argc = parse_options(argc, argv, NULL, options, usagestr, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) usage_msg_opt(_("need a argument"), usagestr, options); *bundle_file = prefix_filename_except_for_dash(prefix, argv[0]); return argc; } static int cmd_bundle_create(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct strvec pack_opts = STRVEC_INIT; int version = -1; int ret; struct option options[] = { OPT_PASSTHRU_ARGV('q', "quiet", &pack_opts, NULL, N_("do not show progress meter"), PARSE_OPT_NOARG), OPT_PASSTHRU_ARGV(0, "progress", &pack_opts, NULL, N_("show progress meter"), PARSE_OPT_NOARG), OPT_PASSTHRU_ARGV(0, "all-progress", &pack_opts, NULL, N_("historical; same as --progress"), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN), OPT_PASSTHRU_ARGV(0, "all-progress-implied", &pack_opts, NULL, N_("historical; does nothing"), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN), OPT_INTEGER(0, "version", &version, N_("specify bundle format version")), OPT_END() }; char *bundle_file; if (isatty(STDERR_FILENO)) strvec_push(&pack_opts, "--progress"); strvec_push(&pack_opts, "--all-progress-implied"); argc = parse_options_cmd_bundle(argc, argv, prefix, builtin_bundle_create_usage, options, &bundle_file); /* bundle internals use argv[1] as further parameters */ if (!startup_info->have_repository) die(_("Need a repository to create a bundle.")); ret = !!create_bundle(the_repository, bundle_file, argc, argv, &pack_opts, version); strvec_clear(&pack_opts); free(bundle_file); return ret; } /* * Similar to read_bundle_header(), but handle "-" as stdin. */ static int open_bundle(const char *path, struct bundle_header *header, const char **name) { if (!strcmp(path, "-")) { if (name) *name = ""; return read_bundle_header_fd(0, header, ""); } if (name) *name = path; return read_bundle_header(path, header); } static int cmd_bundle_verify(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct bundle_header header = BUNDLE_HEADER_INIT; int bundle_fd = -1; int quiet = 0; int ret; struct option options[] = { OPT_BOOL('q', "quiet", &quiet, N_("do not show bundle details")), OPT_END() }; char *bundle_file; const char *name; argc = parse_options_cmd_bundle(argc, argv, prefix, builtin_bundle_verify_usage, options, &bundle_file); /* bundle internals use argv[1] as further parameters */ if (!startup_info->have_repository) { ret = error(_("need a repository to verify a bundle")); goto cleanup; } if ((bundle_fd = open_bundle(bundle_file, &header, &name)) < 0) { ret = 1; goto cleanup; } close(bundle_fd); if (verify_bundle(the_repository, &header, quiet ? VERIFY_BUNDLE_QUIET : VERIFY_BUNDLE_VERBOSE)) { ret = 1; goto cleanup; } fprintf(stderr, _("%s is okay\n"), name); ret = 0; cleanup: free(bundle_file); bundle_header_release(&header); return ret; } static int cmd_bundle_list_heads(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct bundle_header header = BUNDLE_HEADER_INIT; int bundle_fd = -1; int ret; struct option options[] = { OPT_END() }; char *bundle_file; argc = parse_options_cmd_bundle(argc, argv, prefix, builtin_bundle_list_heads_usage, options, &bundle_file); /* bundle internals use argv[1] as further parameters */ if ((bundle_fd = open_bundle(bundle_file, &header, NULL)) < 0) { ret = 1; goto cleanup; } close(bundle_fd); ret = !!list_bundle_refs(&header, argc, argv); cleanup: free(bundle_file); bundle_header_release(&header); return ret; } static int cmd_bundle_unbundle(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct bundle_header header = BUNDLE_HEADER_INIT; int bundle_fd = -1; int ret; int progress = isatty(2); struct option options[] = { OPT_BOOL(0, "progress", &progress, N_("show progress meter")), OPT_END() }; char *bundle_file; struct strvec extra_index_pack_args = STRVEC_INIT; argc = parse_options_cmd_bundle(argc, argv, prefix, builtin_bundle_unbundle_usage, options, &bundle_file); /* bundle internals use argv[1] as further parameters */ if (!startup_info->have_repository) die(_("Need a repository to unbundle.")); if ((bundle_fd = open_bundle(bundle_file, &header, NULL)) < 0) { ret = 1; goto cleanup; } if (progress) strvec_pushl(&extra_index_pack_args, "-v", "--progress-title", _("Unbundling objects"), NULL); ret = !!unbundle(the_repository, &header, bundle_fd, &extra_index_pack_args, NULL) || list_bundle_refs(&header, argc, argv); bundle_header_release(&header); cleanup: strvec_clear(&extra_index_pack_args); free(bundle_file); return ret; } int cmd_bundle(int argc, const char **argv, const char *prefix, struct repository *repo) { parse_opt_subcommand_fn *fn = NULL; struct option options[] = { OPT_SUBCOMMAND("create", &fn, cmd_bundle_create), OPT_SUBCOMMAND("verify", &fn, cmd_bundle_verify), OPT_SUBCOMMAND("list-heads", &fn, cmd_bundle_list_heads), OPT_SUBCOMMAND("unbundle", &fn, cmd_bundle_unbundle), OPT_END() }; argc = parse_options(argc, argv, prefix, options, builtin_bundle_usage, 0); packet_trace_identity("bundle"); return !!fn(argc, argv, prefix, repo); } git-cinnabar-0.7.0/git-core/builtin/cat-file.c000064400000000000000000000711301046102023000171770ustar 00000000000000/* * GIT - The information manager from hell * * Copyright (C) Linus Torvalds, 2005 */ #define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "builtin.h" #include "config.h" #include "convert.h" #include "diff.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "ident.h" #include "parse-options.h" #include "userdiff.h" #include "streaming.h" #include "oid-array.h" #include "packfile.h" #include "object-file.h" #include "object-name.h" #include "object-store-ll.h" #include "replace-object.h" #include "promisor-remote.h" #include "mailmap.h" #include "write-or-die.h" enum batch_mode { BATCH_MODE_CONTENTS, BATCH_MODE_INFO, BATCH_MODE_QUEUE_AND_DISPATCH, }; struct batch_options { int enabled; int follow_symlinks; enum batch_mode batch_mode; int buffer_output; int all_objects; int unordered; int transform_mode; /* may be 'w' or 'c' for --filters or --textconv */ char input_delim; char output_delim; const char *format; }; static const char *force_path; static struct string_list mailmap = STRING_LIST_INIT_NODUP; static int use_mailmap; static char *replace_idents_using_mailmap(char *, size_t *); static char *replace_idents_using_mailmap(char *object_buf, size_t *size) { struct strbuf sb = STRBUF_INIT; const char *headers[] = { "author ", "committer ", "tagger ", NULL }; strbuf_attach(&sb, object_buf, *size, *size + 1); apply_mailmap_to_header(&sb, headers, &mailmap); *size = sb.len; return strbuf_detach(&sb, NULL); } static int filter_object(const char *path, unsigned mode, const struct object_id *oid, char **buf, unsigned long *size) { enum object_type type; *buf = repo_read_object_file(the_repository, oid, &type, size); if (!*buf) return error(_("cannot read object %s '%s'"), oid_to_hex(oid), path); if ((type == OBJ_BLOB) && S_ISREG(mode)) { struct strbuf strbuf = STRBUF_INIT; struct checkout_metadata meta; init_checkout_metadata(&meta, NULL, NULL, oid); if (convert_to_working_tree(the_repository->index, path, *buf, *size, &strbuf, &meta)) { free(*buf); *size = strbuf.len; *buf = strbuf_detach(&strbuf, NULL); } } return 0; } static int stream_blob(const struct object_id *oid) { if (stream_blob_to_fd(1, oid, NULL, 0)) die("unable to stream %s to stdout", oid_to_hex(oid)); return 0; } static int cat_one_file(int opt, const char *exp_type, const char *obj_name, int unknown_type) { int ret; struct object_id oid; enum object_type type; char *buf; unsigned long size; struct object_context obj_context = {0}; struct object_info oi = OBJECT_INFO_INIT; struct strbuf sb = STRBUF_INIT; unsigned flags = OBJECT_INFO_LOOKUP_REPLACE; unsigned get_oid_flags = GET_OID_RECORD_PATH | GET_OID_ONLY_TO_DIE | GET_OID_HASH_ANY; const char *path = force_path; const int opt_cw = (opt == 'c' || opt == 'w'); if (!path && opt_cw) get_oid_flags |= GET_OID_REQUIRE_PATH; if (unknown_type) flags |= OBJECT_INFO_ALLOW_UNKNOWN_TYPE; if (get_oid_with_context(the_repository, obj_name, get_oid_flags, &oid, &obj_context)) die("Not a valid object name %s", obj_name); if (!path) path = obj_context.path; if (obj_context.mode == S_IFINVALID) obj_context.mode = 0100644; buf = NULL; switch (opt) { case 't': oi.type_name = &sb; if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0) die("git cat-file: could not get object info"); if (sb.len) { printf("%s\n", sb.buf); strbuf_release(&sb); ret = 0; goto cleanup; } break; case 's': oi.sizep = &size; if (use_mailmap) { oi.typep = &type; oi.contentp = (void**)&buf; } if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0) die("git cat-file: could not get object info"); if (use_mailmap && (type == OBJ_COMMIT || type == OBJ_TAG)) { size_t s = size; buf = replace_idents_using_mailmap(buf, &s); size = cast_size_t_to_ulong(s); } printf("%"PRIuMAX"\n", (uintmax_t)size); ret = 0; goto cleanup; case 'e': ret = !repo_has_object_file(the_repository, &oid); goto cleanup; case 'w': if (filter_object(path, obj_context.mode, &oid, &buf, &size)) { ret = -1; goto cleanup; } break; case 'c': if (textconv_object(the_repository, path, obj_context.mode, &oid, 1, &buf, &size)) break; /* else fallthrough */ case 'p': type = oid_object_info(the_repository, &oid, NULL); if (type < 0) die("Not a valid object name %s", obj_name); /* custom pretty-print here */ if (type == OBJ_TREE) { const char *ls_args[3] = { NULL }; ls_args[0] = "ls-tree"; ls_args[1] = obj_name; ret = cmd_ls_tree(2, ls_args, NULL, the_repository); goto cleanup; } if (type == OBJ_BLOB) { ret = stream_blob(&oid); goto cleanup; } buf = repo_read_object_file(the_repository, &oid, &type, &size); if (!buf) die("Cannot read object %s", obj_name); if (use_mailmap) { size_t s = size; buf = replace_idents_using_mailmap(buf, &s); size = cast_size_t_to_ulong(s); } /* otherwise just spit out the data */ break; case 0: { enum object_type exp_type_id = type_from_string(exp_type); if (exp_type_id == OBJ_BLOB) { struct object_id blob_oid; if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) { char *buffer = repo_read_object_file(the_repository, &oid, &type, &size); const char *target; if (!buffer) die(_("unable to read %s"), oid_to_hex(&oid)); if (!skip_prefix(buffer, "object ", &target) || get_oid_hex_algop(target, &blob_oid, &hash_algos[oid.algo])) die("%s not a valid tag", oid_to_hex(&oid)); free(buffer); } else oidcpy(&blob_oid, &oid); if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) { ret = stream_blob(&blob_oid); goto cleanup; } /* * we attempted to dereference a tag to a blob * and failed; there may be new dereference * mechanisms this code is not aware of. * fall-back to the usual case. */ } buf = read_object_with_reference(the_repository, &oid, exp_type_id, &size, NULL); if (use_mailmap) { size_t s = size; buf = replace_idents_using_mailmap(buf, &s); size = cast_size_t_to_ulong(s); } break; } default: die("git cat-file: unknown option: %s", exp_type); } if (!buf) die("git cat-file %s: bad file", obj_name); write_or_die(1, buf, size); ret = 0; cleanup: free(buf); object_context_release(&obj_context); return ret; } struct expand_data { struct object_id oid; enum object_type type; unsigned long size; off_t disk_size; const char *rest; struct object_id delta_base_oid; /* * If mark_query is true, we do not expand anything, but rather * just mark the object_info with items we wish to query. */ int mark_query; /* * Whether to split the input on whitespace before feeding it to * get_sha1; this is decided during the mark_query phase based on * whether we have a %(rest) token in our format. */ int split_on_whitespace; /* * After a mark_query run, this object_info is set up to be * passed to oid_object_info_extended. It will point to the data * elements above, so you can retrieve the response from there. */ struct object_info info; /* * This flag will be true if the requested batch format and options * don't require us to call oid_object_info, which can then be * optimized out. */ unsigned skip_object_info : 1; }; static int is_atom(const char *atom, const char *s, int slen) { int alen = strlen(atom); return alen == slen && !memcmp(atom, s, alen); } static int expand_atom(struct strbuf *sb, const char *atom, int len, struct expand_data *data) { if (is_atom("objectname", atom, len)) { if (!data->mark_query) strbuf_addstr(sb, oid_to_hex(&data->oid)); } else if (is_atom("objecttype", atom, len)) { if (data->mark_query) data->info.typep = &data->type; else strbuf_addstr(sb, type_name(data->type)); } else if (is_atom("objectsize", atom, len)) { if (data->mark_query) data->info.sizep = &data->size; else strbuf_addf(sb, "%"PRIuMAX , (uintmax_t)data->size); } else if (is_atom("objectsize:disk", atom, len)) { if (data->mark_query) data->info.disk_sizep = &data->disk_size; else strbuf_addf(sb, "%"PRIuMAX, (uintmax_t)data->disk_size); } else if (is_atom("rest", atom, len)) { if (data->mark_query) data->split_on_whitespace = 1; else if (data->rest) strbuf_addstr(sb, data->rest); } else if (is_atom("deltabase", atom, len)) { if (data->mark_query) data->info.delta_base_oid = &data->delta_base_oid; else strbuf_addstr(sb, oid_to_hex(&data->delta_base_oid)); } else return 0; return 1; } static void expand_format(struct strbuf *sb, const char *start, struct expand_data *data) { while (strbuf_expand_step(sb, &start)) { const char *end; if (skip_prefix(start, "%", &start) || *start != '(') strbuf_addch(sb, '%'); else if ((end = strchr(start + 1, ')')) && expand_atom(sb, start + 1, end - start - 1, data)) start = end + 1; else strbuf_expand_bad_format(start, "cat-file"); } } static void batch_write(struct batch_options *opt, const void *data, int len) { if (opt->buffer_output) { if (fwrite(data, 1, len, stdout) != len) die_errno("unable to write to stdout"); } else write_or_die(1, data, len); } static void print_object_or_die(struct batch_options *opt, struct expand_data *data) { const struct object_id *oid = &data->oid; assert(data->info.typep); if (data->type == OBJ_BLOB) { if (opt->buffer_output) fflush(stdout); if (opt->transform_mode) { char *contents; unsigned long size; if (!data->rest) die("missing path for '%s'", oid_to_hex(oid)); if (opt->transform_mode == 'w') { if (filter_object(data->rest, 0100644, oid, &contents, &size)) die("could not convert '%s' %s", oid_to_hex(oid), data->rest); } else if (opt->transform_mode == 'c') { enum object_type type; if (!textconv_object(the_repository, data->rest, 0100644, oid, 1, &contents, &size)) contents = repo_read_object_file(the_repository, oid, &type, &size); if (!contents) die("could not convert '%s' %s", oid_to_hex(oid), data->rest); } else BUG("invalid transform_mode: %c", opt->transform_mode); batch_write(opt, contents, size); free(contents); } else { stream_blob(oid); } } else { enum object_type type; unsigned long size; void *contents; contents = repo_read_object_file(the_repository, oid, &type, &size); if (!contents) die("object %s disappeared", oid_to_hex(oid)); if (use_mailmap) { size_t s = size; contents = replace_idents_using_mailmap(contents, &s); size = cast_size_t_to_ulong(s); } if (type != data->type) die("object %s changed type!?", oid_to_hex(oid)); if (data->info.sizep && size != data->size && !use_mailmap) die("object %s changed size!?", oid_to_hex(oid)); batch_write(opt, contents, size); free(contents); } } static void print_default_format(struct strbuf *scratch, struct expand_data *data, struct batch_options *opt) { strbuf_addf(scratch, "%s %s %"PRIuMAX"%c", oid_to_hex(&data->oid), type_name(data->type), (uintmax_t)data->size, opt->output_delim); } /* * If "pack" is non-NULL, then "offset" is the byte offset within the pack from * which the object may be accessed (though note that we may also rely on * data->oid, too). If "pack" is NULL, then offset is ignored. */ static void batch_object_write(const char *obj_name, struct strbuf *scratch, struct batch_options *opt, struct expand_data *data, struct packed_git *pack, off_t offset) { if (!data->skip_object_info) { int ret; if (use_mailmap) data->info.typep = &data->type; if (pack) ret = packed_object_info(the_repository, pack, offset, &data->info); else ret = oid_object_info_extended(the_repository, &data->oid, &data->info, OBJECT_INFO_LOOKUP_REPLACE); if (ret < 0) { printf("%s missing%c", obj_name ? obj_name : oid_to_hex(&data->oid), opt->output_delim); fflush(stdout); return; } if (use_mailmap && (data->type == OBJ_COMMIT || data->type == OBJ_TAG)) { size_t s = data->size; char *buf = NULL; buf = repo_read_object_file(the_repository, &data->oid, &data->type, &data->size); if (!buf) die(_("unable to read %s"), oid_to_hex(&data->oid)); buf = replace_idents_using_mailmap(buf, &s); data->size = cast_size_t_to_ulong(s); free(buf); } } strbuf_reset(scratch); if (!opt->format) { print_default_format(scratch, data, opt); } else { expand_format(scratch, opt->format, data); strbuf_addch(scratch, opt->output_delim); } batch_write(opt, scratch->buf, scratch->len); if (opt->batch_mode == BATCH_MODE_CONTENTS) { print_object_or_die(opt, data); batch_write(opt, &opt->output_delim, 1); } } static void batch_one_object(const char *obj_name, struct strbuf *scratch, struct batch_options *opt, struct expand_data *data) { struct object_context ctx = {0}; int flags = GET_OID_HASH_ANY | (opt->follow_symlinks ? GET_OID_FOLLOW_SYMLINKS : 0); enum get_oid_result result; result = get_oid_with_context(the_repository, obj_name, flags, &data->oid, &ctx); if (result != FOUND) { switch (result) { case MISSING_OBJECT: printf("%s missing%c", obj_name, opt->output_delim); break; case SHORT_NAME_AMBIGUOUS: printf("%s ambiguous%c", obj_name, opt->output_delim); break; case DANGLING_SYMLINK: printf("dangling %"PRIuMAX"%c%s%c", (uintmax_t)strlen(obj_name), opt->output_delim, obj_name, opt->output_delim); break; case SYMLINK_LOOP: printf("loop %"PRIuMAX"%c%s%c", (uintmax_t)strlen(obj_name), opt->output_delim, obj_name, opt->output_delim); break; case NOT_DIR: printf("notdir %"PRIuMAX"%c%s%c", (uintmax_t)strlen(obj_name), opt->output_delim, obj_name, opt->output_delim); break; default: BUG("unknown get_sha1_with_context result %d\n", result); break; } fflush(stdout); goto out; } if (ctx.mode == 0) { printf("symlink %"PRIuMAX"%c%s%c", (uintmax_t)ctx.symlink_path.len, opt->output_delim, ctx.symlink_path.buf, opt->output_delim); fflush(stdout); goto out; } batch_object_write(obj_name, scratch, opt, data, NULL, 0); out: object_context_release(&ctx); } struct object_cb_data { struct batch_options *opt; struct expand_data *expand; struct oidset *seen; struct strbuf *scratch; }; static int batch_object_cb(const struct object_id *oid, void *vdata) { struct object_cb_data *data = vdata; oidcpy(&data->expand->oid, oid); batch_object_write(NULL, data->scratch, data->opt, data->expand, NULL, 0); return 0; } static int collect_loose_object(const struct object_id *oid, const char *path UNUSED, void *data) { oid_array_append(data, oid); return 0; } static int collect_packed_object(const struct object_id *oid, struct packed_git *pack UNUSED, uint32_t pos UNUSED, void *data) { oid_array_append(data, oid); return 0; } static int batch_unordered_object(const struct object_id *oid, struct packed_git *pack, off_t offset, void *vdata) { struct object_cb_data *data = vdata; if (oidset_insert(data->seen, oid)) return 0; oidcpy(&data->expand->oid, oid); batch_object_write(NULL, data->scratch, data->opt, data->expand, pack, offset); return 0; } static int batch_unordered_loose(const struct object_id *oid, const char *path UNUSED, void *data) { return batch_unordered_object(oid, NULL, 0, data); } static int batch_unordered_packed(const struct object_id *oid, struct packed_git *pack, uint32_t pos, void *data) { return batch_unordered_object(oid, pack, nth_packed_object_offset(pack, pos), data); } typedef void (*parse_cmd_fn_t)(struct batch_options *, const char *, struct strbuf *, struct expand_data *); struct queued_cmd { parse_cmd_fn_t fn; char *line; }; static void parse_cmd_contents(struct batch_options *opt, const char *line, struct strbuf *output, struct expand_data *data) { opt->batch_mode = BATCH_MODE_CONTENTS; batch_one_object(line, output, opt, data); } static void parse_cmd_info(struct batch_options *opt, const char *line, struct strbuf *output, struct expand_data *data) { opt->batch_mode = BATCH_MODE_INFO; batch_one_object(line, output, opt, data); } static void dispatch_calls(struct batch_options *opt, struct strbuf *output, struct expand_data *data, struct queued_cmd *cmd, int nr) { int i; if (!opt->buffer_output) die(_("flush is only for --buffer mode")); for (i = 0; i < nr; i++) cmd[i].fn(opt, cmd[i].line, output, data); fflush(stdout); } static void free_cmds(struct queued_cmd *cmd, size_t *nr) { size_t i; for (i = 0; i < *nr; i++) FREE_AND_NULL(cmd[i].line); *nr = 0; } static const struct parse_cmd { const char *name; parse_cmd_fn_t fn; unsigned takes_args; } commands[] = { { "contents", parse_cmd_contents, 1}, { "info", parse_cmd_info, 1}, { "flush", NULL, 0}, }; static void batch_objects_command(struct batch_options *opt, struct strbuf *output, struct expand_data *data) { struct strbuf input = STRBUF_INIT; struct queued_cmd *queued_cmd = NULL; size_t alloc = 0, nr = 0; while (strbuf_getdelim_strip_crlf(&input, stdin, opt->input_delim) != EOF) { int i; const struct parse_cmd *cmd = NULL; const char *p = NULL, *cmd_end; struct queued_cmd call = {0}; if (!input.len) die(_("empty command in input")); if (isspace(*input.buf)) die(_("whitespace before command: '%s'"), input.buf); for (i = 0; i < ARRAY_SIZE(commands); i++) { if (!skip_prefix(input.buf, commands[i].name, &cmd_end)) continue; cmd = &commands[i]; if (cmd->takes_args) { if (*cmd_end != ' ') die(_("%s requires arguments"), commands[i].name); p = cmd_end + 1; } else if (*cmd_end) { die(_("%s takes no arguments"), commands[i].name); } break; } if (!cmd) die(_("unknown command: '%s'"), input.buf); if (!strcmp(cmd->name, "flush")) { dispatch_calls(opt, output, data, queued_cmd, nr); free_cmds(queued_cmd, &nr); } else if (!opt->buffer_output) { cmd->fn(opt, p, output, data); } else { ALLOC_GROW(queued_cmd, nr + 1, alloc); call.fn = cmd->fn; call.line = xstrdup_or_null(p); queued_cmd[nr++] = call; } } if (opt->buffer_output && nr && !git_env_bool("GIT_TEST_CAT_FILE_NO_FLUSH_ON_EXIT", 0)) { dispatch_calls(opt, output, data, queued_cmd, nr); free_cmds(queued_cmd, &nr); } free_cmds(queued_cmd, &nr); free(queued_cmd); strbuf_release(&input); } #define DEFAULT_FORMAT "%(objectname) %(objecttype) %(objectsize)" static int batch_objects(struct batch_options *opt) { struct strbuf input = STRBUF_INIT; struct strbuf output = STRBUF_INIT; struct expand_data data; int save_warning; int retval = 0; /* * Expand once with our special mark_query flag, which will prime the * object_info to be handed to oid_object_info_extended for each * object. */ memset(&data, 0, sizeof(data)); data.mark_query = 1; expand_format(&output, opt->format ? opt->format : DEFAULT_FORMAT, &data); data.mark_query = 0; strbuf_release(&output); if (opt->transform_mode) data.split_on_whitespace = 1; if (opt->format && !strcmp(opt->format, DEFAULT_FORMAT)) opt->format = NULL; /* * If we are printing out the object, then always fill in the type, * since we will want to decide whether or not to stream. */ if (opt->batch_mode == BATCH_MODE_CONTENTS) data.info.typep = &data.type; if (opt->all_objects) { struct object_cb_data cb; struct object_info empty = OBJECT_INFO_INIT; if (!memcmp(&data.info, &empty, sizeof(empty))) data.skip_object_info = 1; if (repo_has_promisor_remote(the_repository)) warning("This repository uses promisor remotes. Some objects may not be loaded."); disable_replace_refs(); cb.opt = opt; cb.expand = &data; cb.scratch = &output; if (opt->unordered) { struct oidset seen = OIDSET_INIT; cb.seen = &seen; for_each_loose_object(batch_unordered_loose, &cb, 0); for_each_packed_object(the_repository, batch_unordered_packed, &cb, FOR_EACH_OBJECT_PACK_ORDER); oidset_clear(&seen); } else { struct oid_array sa = OID_ARRAY_INIT; for_each_loose_object(collect_loose_object, &sa, 0); for_each_packed_object(the_repository, collect_packed_object, &sa, 0); oid_array_for_each_unique(&sa, batch_object_cb, &cb); oid_array_clear(&sa); } strbuf_release(&output); return 0; } /* * We are going to call get_sha1 on a potentially very large number of * objects. In most large cases, these will be actual object sha1s. The * cost to double-check that each one is not also a ref (just so we can * warn) ends up dwarfing the actual cost of the object lookups * themselves. We can work around it by just turning off the warning. */ save_warning = warn_on_object_refname_ambiguity; warn_on_object_refname_ambiguity = 0; if (opt->batch_mode == BATCH_MODE_QUEUE_AND_DISPATCH) { batch_objects_command(opt, &output, &data); goto cleanup; } while (strbuf_getdelim_strip_crlf(&input, stdin, opt->input_delim) != EOF) { if (data.split_on_whitespace) { /* * Split at first whitespace, tying off the beginning * of the string and saving the remainder (or NULL) in * data.rest. */ char *p = strpbrk(input.buf, " \t"); if (p) { while (*p && strchr(" \t", *p)) *p++ = '\0'; } data.rest = p; } batch_one_object(input.buf, &output, opt, &data); } cleanup: strbuf_release(&input); strbuf_release(&output); warn_on_object_refname_ambiguity = save_warning; return retval; } static int git_cat_file_config(const char *var, const char *value, const struct config_context *ctx, void *cb) { if (userdiff_config(var, value) < 0) return -1; return git_default_config(var, value, ctx, cb); } static int batch_option_callback(const struct option *opt, const char *arg, int unset) { struct batch_options *bo = opt->value; BUG_ON_OPT_NEG(unset); if (bo->enabled) { return error(_("only one batch option may be specified")); } bo->enabled = 1; if (!strcmp(opt->long_name, "batch")) bo->batch_mode = BATCH_MODE_CONTENTS; else if (!strcmp(opt->long_name, "batch-check")) bo->batch_mode = BATCH_MODE_INFO; else if (!strcmp(opt->long_name, "batch-command")) bo->batch_mode = BATCH_MODE_QUEUE_AND_DISPATCH; else BUG("%s given to batch-option-callback", opt->long_name); bo->format = arg; return 0; } int cmd_cat_file(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int opt = 0; int opt_cw = 0; int opt_epts = 0; const char *exp_type = NULL, *obj_name = NULL; struct batch_options batch = {0}; int unknown_type = 0; int input_nul_terminated = 0; int nul_terminated = 0; const char * const usage[] = { N_("git cat-file "), N_("git cat-file (-e | -p) "), N_("git cat-file (-t | -s) [--allow-unknown-type] "), N_("git cat-file (--textconv | --filters)\n" " [: | --path= ]"), N_("git cat-file (--batch | --batch-check | --batch-command) [--batch-all-objects]\n" " [--buffer] [--follow-symlinks] [--unordered]\n" " [--textconv | --filters] [-Z]"), NULL }; const struct option options[] = { /* Simple queries */ OPT_GROUP(N_("Check object existence or emit object contents")), OPT_CMDMODE('e', NULL, &opt, N_("check if exists"), 'e'), OPT_CMDMODE('p', NULL, &opt, N_("pretty-print content"), 'p'), OPT_GROUP(N_("Emit [broken] object attributes")), OPT_CMDMODE('t', NULL, &opt, N_("show object type (one of 'blob', 'tree', 'commit', 'tag', ...)"), 't'), OPT_CMDMODE('s', NULL, &opt, N_("show object size"), 's'), OPT_BOOL(0, "allow-unknown-type", &unknown_type, N_("allow -s and -t to work with broken/corrupt objects")), OPT_BOOL(0, "use-mailmap", &use_mailmap, N_("use mail map file")), OPT_ALIAS(0, "mailmap", "use-mailmap"), /* Batch mode */ OPT_GROUP(N_("Batch objects requested on stdin (or --batch-all-objects)")), OPT_CALLBACK_F(0, "batch", &batch, N_("format"), N_("show full or contents"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, batch_option_callback), OPT_CALLBACK_F(0, "batch-check", &batch, N_("format"), N_("like --batch, but don't emit "), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, batch_option_callback), OPT_BOOL_F('z', NULL, &input_nul_terminated, N_("stdin is NUL-terminated"), PARSE_OPT_HIDDEN), OPT_BOOL('Z', NULL, &nul_terminated, N_("stdin and stdout is NUL-terminated")), OPT_CALLBACK_F(0, "batch-command", &batch, N_("format"), N_("read commands from stdin"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, batch_option_callback), OPT_CMDMODE(0, "batch-all-objects", &opt, N_("with --batch[-check]: ignores stdin, batches all known objects"), 'b'), /* Batch-specific options */ OPT_GROUP(N_("Change or optimize batch output")), OPT_BOOL(0, "buffer", &batch.buffer_output, N_("buffer --batch output")), OPT_BOOL(0, "follow-symlinks", &batch.follow_symlinks, N_("follow in-tree symlinks")), OPT_BOOL(0, "unordered", &batch.unordered, N_("do not order objects before emitting them")), /* Textconv options, stand-ole*/ OPT_GROUP(N_("Emit object (blob or tree) with conversion or filter (stand-alone, or with batch)")), OPT_CMDMODE(0, "textconv", &opt, N_("run textconv on object's content"), 'c'), OPT_CMDMODE(0, "filters", &opt, N_("run filters on object's content"), 'w'), OPT_STRING(0, "path", &force_path, N_("blob|tree"), N_("use a for (--textconv | --filters); Not with 'batch'")), OPT_END() }; git_config(git_cat_file_config, NULL); batch.buffer_output = -1; argc = parse_options(argc, argv, prefix, options, usage, 0); opt_cw = (opt == 'c' || opt == 'w'); opt_epts = (opt == 'e' || opt == 'p' || opt == 't' || opt == 's'); if (use_mailmap) read_mailmap(&mailmap); /* --batch-all-objects? */ if (opt == 'b') batch.all_objects = 1; /* Option compatibility */ if (force_path && !opt_cw) usage_msg_optf(_("'%s=<%s>' needs '%s' or '%s'"), usage, options, "--path", _("path|tree-ish"), "--filters", "--textconv"); /* Option compatibility with batch mode */ if (batch.enabled) ; else if (batch.follow_symlinks) usage_msg_optf(_("'%s' requires a batch mode"), usage, options, "--follow-symlinks"); else if (batch.buffer_output >= 0) usage_msg_optf(_("'%s' requires a batch mode"), usage, options, "--buffer"); else if (batch.all_objects) usage_msg_optf(_("'%s' requires a batch mode"), usage, options, "--batch-all-objects"); else if (input_nul_terminated) usage_msg_optf(_("'%s' requires a batch mode"), usage, options, "-z"); else if (nul_terminated) usage_msg_optf(_("'%s' requires a batch mode"), usage, options, "-Z"); batch.input_delim = batch.output_delim = '\n'; if (input_nul_terminated) batch.input_delim = '\0'; if (nul_terminated) batch.input_delim = batch.output_delim = '\0'; /* Batch defaults */ if (batch.buffer_output < 0) batch.buffer_output = batch.all_objects; prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; /* Return early if we're in batch mode? */ if (batch.enabled) { if (opt_cw) batch.transform_mode = opt; else if (opt && opt != 'b') usage_msg_optf(_("'-%c' is incompatible with batch mode"), usage, options, opt); else if (argc) usage_msg_opt(_("batch modes take no arguments"), usage, options); return batch_objects(&batch); } if (opt) { if (!argc && opt == 'c') usage_msg_optf(_(" required with '%s'"), usage, options, "--textconv"); else if (!argc && opt == 'w') usage_msg_optf(_(" required with '%s'"), usage, options, "--filters"); else if (!argc && opt_epts) usage_msg_optf(_(" required with '-%c'"), usage, options, opt); else if (argc == 1) obj_name = argv[0]; else usage_msg_opt(_("too many arguments"), usage, options); } else if (!argc) { usage_with_options(usage, options); } else if (argc != 2) { usage_msg_optf(_("only two arguments allowed in mode, not %d"), usage, options, argc); } else if (argc) { exp_type = argv[0]; obj_name = argv[1]; } if (unknown_type && opt != 't' && opt != 's') die("git cat-file --allow-unknown-type: use with -s or -t"); return cat_one_file(opt, exp_type, obj_name, unknown_type); } git-cinnabar-0.7.0/git-core/builtin/check-attr.c000064400000000000000000000122051046102023000175360ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "config.h" #include "attr.h" #include "environment.h" #include "gettext.h" #include "object-name.h" #include "quote.h" #include "setup.h" #include "parse-options.h" #include "write-or-die.h" static int all_attrs; static int cached_attrs; static int stdin_paths; static char *source; static const char * const check_attr_usage[] = { N_("git check-attr [--source ] [-a | --all | ...] [--] ..."), N_("git check-attr --stdin [-z] [--source ] [-a | --all | ...]"), NULL }; static int nul_term_line; static const struct option check_attr_options[] = { OPT_BOOL('a', "all", &all_attrs, N_("report all attributes set on file")), OPT_BOOL(0, "cached", &cached_attrs, N_("use .gitattributes only from the index")), OPT_BOOL(0 , "stdin", &stdin_paths, N_("read file names from stdin")), OPT_BOOL('z', NULL, &nul_term_line, N_("terminate input and output records by a NUL character")), OPT_STRING(0, "source", &source, N_(""), N_("which tree-ish to check attributes at")), OPT_END() }; static void output_attr(struct attr_check *check, const char *file) { int j; int cnt = check->nr; for (j = 0; j < cnt; j++) { const char *value = check->items[j].value; if (ATTR_TRUE(value)) value = "set"; else if (ATTR_FALSE(value)) value = "unset"; else if (ATTR_UNSET(value)) value = "unspecified"; if (nul_term_line) { printf("%s%c" /* path */ "%s%c" /* attrname */ "%s%c" /* attrvalue */, file, 0, git_attr_name(check->items[j].attr), 0, value, 0); } else { quote_c_style(file, NULL, stdout, 0); printf(": %s: %s\n", git_attr_name(check->items[j].attr), value); } } } static void check_attr(const char *prefix, struct attr_check *check, int collect_all, const char *file) { char *full_path = prefix_path(prefix, prefix ? strlen(prefix) : 0, file); if (collect_all) { git_all_attrs(the_repository->index, full_path, check); } else { git_check_attr(the_repository->index, full_path, check); } output_attr(check, file); free(full_path); } static void check_attr_stdin_paths(const char *prefix, struct attr_check *check, int collect_all) { struct strbuf buf = STRBUF_INIT; struct strbuf unquoted = STRBUF_INIT; strbuf_getline_fn getline_fn; getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf; while (getline_fn(&buf, stdin) != EOF) { if (!nul_term_line && buf.buf[0] == '"') { strbuf_reset(&unquoted); if (unquote_c_style(&unquoted, buf.buf, NULL)) die("line is badly quoted"); strbuf_swap(&buf, &unquoted); } check_attr(prefix, check, collect_all, buf.buf); maybe_flush_or_die(stdout, "attribute to stdout"); } strbuf_release(&buf); strbuf_release(&unquoted); } static NORETURN void error_with_usage(const char *msg) { error("%s", msg); usage_with_options(check_attr_usage, check_attr_options); } int cmd_check_attr(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct attr_check *check; struct object_id initialized_oid; int cnt, i, doubledash, filei; if (!is_bare_repository()) setup_work_tree(); git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, check_attr_options, check_attr_usage, PARSE_OPT_KEEP_DASHDASH); prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; if (repo_read_index(the_repository) < 0) { die("invalid cache"); } if (cached_attrs) git_attr_set_direction(GIT_ATTR_INDEX); doubledash = -1; for (i = 0; doubledash < 0 && i < argc; i++) { if (!strcmp(argv[i], "--")) doubledash = i; } /* Process --all and/or attribute arguments: */ if (all_attrs) { if (doubledash >= 1) error_with_usage("Attributes and --all both specified"); cnt = 0; filei = doubledash + 1; } else if (doubledash == 0) { error_with_usage("No attribute specified"); } else if (doubledash < 0) { if (!argc) error_with_usage("No attribute specified"); if (stdin_paths) { /* Treat all arguments as attribute names. */ cnt = argc; filei = argc; } else { /* Treat exactly one argument as an attribute name. */ cnt = 1; filei = 1; } } else { cnt = doubledash; filei = doubledash + 1; } /* Check file argument(s): */ if (stdin_paths) { if (filei < argc) error_with_usage("Can't specify files with --stdin"); } else { if (filei >= argc) error_with_usage("No file specified"); } check = attr_check_alloc(); if (!all_attrs) { for (i = 0; i < cnt; i++) { const struct git_attr *a = git_attr(argv[i]); if (!a) return error("%s: not a valid attribute name", argv[i]); attr_check_append(check, a); } } if (source) { if (repo_get_oid_tree(the_repository, source, &initialized_oid)) die("%s: not a valid tree-ish source", source); set_git_attr_source(source); } if (stdin_paths) check_attr_stdin_paths(prefix, check, all_attrs); else { for (i = filei; i < argc; i++) check_attr(prefix, check, all_attrs, argv[i]); maybe_flush_or_die(stdout, "attribute to stdout"); } attr_check_free(check); return 0; } git-cinnabar-0.7.0/git-core/builtin/check-ignore.c000064400000000000000000000124551046102023000200560ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "config.h" #include "dir.h" #include "gettext.h" #include "quote.h" #include "pathspec.h" #include "parse-options.h" #include "submodule.h" #include "write-or-die.h" static int quiet, verbose, stdin_paths, show_non_matching, no_index; static const char * const check_ignore_usage[] = { "git check-ignore [] ...", "git check-ignore [] --stdin", NULL }; static int nul_term_line; static const struct option check_ignore_options[] = { OPT__QUIET(&quiet, N_("suppress progress reporting")), OPT__VERBOSE(&verbose, N_("be verbose")), OPT_GROUP(""), OPT_BOOL(0, "stdin", &stdin_paths, N_("read file names from stdin")), OPT_BOOL('z', NULL, &nul_term_line, N_("terminate input and output records by a NUL character")), OPT_BOOL('n', "non-matching", &show_non_matching, N_("show non-matching input paths")), OPT_BOOL(0, "no-index", &no_index, N_("ignore index when checking")), OPT_END() }; static void output_pattern(const char *path, struct path_pattern *pattern) { const char *bang = (pattern && pattern->flags & PATTERN_FLAG_NEGATIVE) ? "!" : ""; const char *slash = (pattern && pattern->flags & PATTERN_FLAG_MUSTBEDIR) ? "/" : ""; if (!nul_term_line) { if (!verbose) { write_name_quoted(path, stdout, '\n'); } else { if (pattern) { quote_c_style(pattern->pl->src, NULL, stdout, 0); printf(":%d:%s%s%s\t", pattern->srcpos, bang, pattern->pattern, slash); } else { printf("::\t"); } quote_c_style(path, NULL, stdout, 0); fputc('\n', stdout); } } else { if (!verbose) { printf("%s%c", path, '\0'); } else { if (pattern) printf("%s%c%d%c%s%s%s%c%s%c", pattern->pl->src, '\0', pattern->srcpos, '\0', bang, pattern->pattern, slash, '\0', path, '\0'); else printf("%c%c%c%s%c", '\0', '\0', '\0', path, '\0'); } } } static int check_ignore(struct dir_struct *dir, const char *prefix, int argc, const char **argv) { const char *full_path; char *seen; int num_ignored = 0, i; struct path_pattern *pattern; struct pathspec pathspec; if (!argc) { if (!quiet) fprintf(stderr, "no pathspec given.\n"); return 0; } /* * check-ignore just needs paths. Magic beyond :/ is really * irrelevant. */ parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC & ~PATHSPEC_FROMTOP, PATHSPEC_SYMLINK_LEADING_PATH | PATHSPEC_KEEP_ORDER, prefix, argv); die_path_inside_submodule(the_repository->index, &pathspec); /* * look for pathspecs matching entries in the index, since these * should not be ignored, in order to be consistent with * 'git status', 'git add' etc. */ seen = find_pathspecs_matching_against_index(&pathspec, the_repository->index, PS_HEED_SKIP_WORKTREE); for (i = 0; i < pathspec.nr; i++) { full_path = pathspec.items[i].match; pattern = NULL; if (!seen[i]) { int dtype = DT_UNKNOWN; pattern = last_matching_pattern(dir, the_repository->index, full_path, &dtype); if (!verbose && pattern && pattern->flags & PATTERN_FLAG_NEGATIVE) pattern = NULL; } if (!quiet && (pattern || show_non_matching)) output_pattern(pathspec.items[i].original, pattern); if (pattern) num_ignored++; } free(seen); clear_pathspec(&pathspec); return num_ignored; } static int check_ignore_stdin_paths(struct dir_struct *dir, const char *prefix) { struct strbuf buf = STRBUF_INIT; struct strbuf unquoted = STRBUF_INIT; char *pathspec[2] = { NULL, NULL }; strbuf_getline_fn getline_fn; int num_ignored = 0; getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf; while (getline_fn(&buf, stdin) != EOF) { if (!nul_term_line && buf.buf[0] == '"') { strbuf_reset(&unquoted); if (unquote_c_style(&unquoted, buf.buf, NULL)) die("line is badly quoted"); strbuf_swap(&buf, &unquoted); } pathspec[0] = buf.buf; num_ignored += check_ignore(dir, prefix, 1, (const char **)pathspec); maybe_flush_or_die(stdout, "check-ignore to stdout"); } strbuf_release(&buf); strbuf_release(&unquoted); return num_ignored; } int cmd_check_ignore(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int num_ignored; struct dir_struct dir = DIR_INIT; git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, check_ignore_options, check_ignore_usage, 0); if (stdin_paths) { if (argc > 0) die(_("cannot specify pathnames with --stdin")); } else { if (nul_term_line) die(_("-z only makes sense with --stdin")); if (argc == 0) die(_("no path specified")); } if (quiet) { if (argc > 1) die(_("--quiet is only valid with a single pathname")); if (verbose) die(_("cannot have both --quiet and --verbose")); } if (show_non_matching && !verbose) die(_("--non-matching is only valid with --verbose")); /* read_cache() is only necessary so we can watch out for submodules. */ if (!no_index && repo_read_index(the_repository) < 0) die(_("index file corrupt")); setup_standard_excludes(&dir); if (stdin_paths) { num_ignored = check_ignore_stdin_paths(&dir, prefix); } else { num_ignored = check_ignore(&dir, prefix, argc, argv); maybe_flush_or_die(stdout, "ignore to stdout"); } dir_clear(&dir); return !num_ignored; } git-cinnabar-0.7.0/git-core/builtin/check-mailmap.c000064400000000000000000000043531046102023000202110ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #include "builtin.h" #include "config.h" #include "gettext.h" #include "ident.h" #include "mailmap.h" #include "parse-options.h" #include "strbuf.h" #include "string-list.h" #include "write-or-die.h" static int use_stdin; static const char *mailmap_file, *mailmap_blob; static const char * const check_mailmap_usage[] = { N_("git check-mailmap [] ..."), NULL }; static const struct option check_mailmap_options[] = { OPT_BOOL(0, "stdin", &use_stdin, N_("also read contacts from stdin")), OPT_FILENAME(0, "mailmap-file", &mailmap_file, N_("read additional mailmap entries from file")), OPT_STRING(0, "mailmap-blob", &mailmap_blob, N_("blob"), N_("read additional mailmap entries from blob")), OPT_END() }; static void check_mailmap(struct string_list *mailmap, const char *contact) { const char *name, *mail; size_t namelen, maillen; struct ident_split ident; if (!split_ident_line(&ident, contact, strlen(contact))) { name = ident.name_begin; namelen = ident.name_end - ident.name_begin; mail = ident.mail_begin; maillen = ident.mail_end - ident.mail_begin; } else { name = NULL; namelen = 0; mail = contact; maillen = strlen(contact); } map_user(mailmap, &mail, &maillen, &name, &namelen); if (namelen) printf("%.*s ", (int)namelen, name); printf("<%.*s>\n", (int)maillen, mail); } int cmd_check_mailmap(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int i; struct string_list mailmap = STRING_LIST_INIT_NODUP; git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, check_mailmap_options, check_mailmap_usage, 0); if (argc == 0 && !use_stdin) die(_("no contacts specified")); read_mailmap(&mailmap); if (mailmap_blob) read_mailmap_blob(&mailmap, mailmap_blob); if (mailmap_file) read_mailmap_file(&mailmap, mailmap_file, 0); for (i = 0; i < argc; ++i) check_mailmap(&mailmap, argv[i]); maybe_flush_or_die(stdout, "stdout"); if (use_stdin) { struct strbuf buf = STRBUF_INIT; while (strbuf_getline_lf(&buf, stdin) != EOF) { check_mailmap(&mailmap, buf.buf); maybe_flush_or_die(stdout, "stdout"); } strbuf_release(&buf); } clear_mailmap(&mailmap); return 0; } git-cinnabar-0.7.0/git-core/builtin/check-ref-format.c000064400000000000000000000045121046102023000206300ustar 00000000000000/* * GIT - The information manager from hell */ #include "builtin.h" #include "refs.h" #include "setup.h" #include "strbuf.h" static const char builtin_check_ref_format_usage[] = "git check-ref-format [--normalize] [] \n" " or: git check-ref-format --branch "; /* * Return a copy of refname but with leading slashes removed and runs * of adjacent slashes replaced with single slashes. * * This function is similar to normalize_path_copy(), but stripped down * to meet check_ref_format's simpler needs. */ static char *collapse_slashes(const char *refname) { char *ret = xmallocz(strlen(refname)); char ch; char prev = '/'; char *cp = ret; while ((ch = *refname++) != '\0') { if (prev == '/' && ch == prev) continue; *cp++ = ch; prev = ch; } *cp = '\0'; return ret; } static int check_ref_format_branch(const char *arg) { struct strbuf sb = STRBUF_INIT; const char *name; int nongit; setup_git_directory_gently(&nongit); if (check_branch_ref(&sb, arg) || !skip_prefix(sb.buf, "refs/heads/", &name)) die("'%s' is not a valid branch name", arg); printf("%s\n", name); strbuf_release(&sb); return 0; } int cmd_check_ref_format(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int i; int normalize = 0; int flags = 0; const char *refname; char *to_free = NULL; int ret = 1; BUG_ON_NON_EMPTY_PREFIX(prefix); if (argc == 2 && !strcmp(argv[1], "-h")) usage(builtin_check_ref_format_usage); if (argc == 3 && !strcmp(argv[1], "--branch")) return check_ref_format_branch(argv[2]); for (i = 1; i < argc && argv[i][0] == '-'; i++) { if (!strcmp(argv[i], "--normalize") || !strcmp(argv[i], "--print")) normalize = 1; else if (!strcmp(argv[i], "--allow-onelevel")) flags |= REFNAME_ALLOW_ONELEVEL; else if (!strcmp(argv[i], "--no-allow-onelevel")) flags &= ~REFNAME_ALLOW_ONELEVEL; else if (!strcmp(argv[i], "--refspec-pattern")) flags |= REFNAME_REFSPEC_PATTERN; else usage(builtin_check_ref_format_usage); } if (! (i == argc - 1)) usage(builtin_check_ref_format_usage); refname = argv[i]; if (normalize) refname = to_free = collapse_slashes(refname); if (check_refname_format(refname, flags)) goto cleanup; if (normalize) printf("%s\n", refname); ret = 0; cleanup: free(to_free); return ret; } git-cinnabar-0.7.0/git-core/builtin/checkout--worker.c000064400000000000000000000102071046102023000207020ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "builtin.h" #include "config.h" #include "entry.h" #include "gettext.h" #include "parallel-checkout.h" #include "parse-options.h" #include "pkt-line.h" #include "read-cache-ll.h" static void packet_to_pc_item(const char *buffer, int len, struct parallel_checkout_item *pc_item) { const struct pc_item_fixed_portion *fixed_portion; const char *variant; char *encoding; if (len < sizeof(struct pc_item_fixed_portion)) BUG("checkout worker received too short item (got %dB, exp %dB)", len, (int)sizeof(struct pc_item_fixed_portion)); fixed_portion = (struct pc_item_fixed_portion *)buffer; if (len - sizeof(struct pc_item_fixed_portion) != fixed_portion->name_len + fixed_portion->working_tree_encoding_len) BUG("checkout worker received corrupted item"); variant = buffer + sizeof(struct pc_item_fixed_portion); /* * Note: the main process uses zero length to communicate that the * encoding is NULL. There is no use case that requires sending an * actual empty string, since convert_attrs() never sets * ca.working_tree_enconding to "". */ if (fixed_portion->working_tree_encoding_len) { encoding = xmemdupz(variant, fixed_portion->working_tree_encoding_len); variant += fixed_portion->working_tree_encoding_len; } else { encoding = NULL; } memset(pc_item, 0, sizeof(*pc_item)); pc_item->ce = make_empty_transient_cache_entry(fixed_portion->name_len, NULL); pc_item->ce->ce_namelen = fixed_portion->name_len; pc_item->ce->ce_mode = fixed_portion->ce_mode; memcpy(pc_item->ce->name, variant, pc_item->ce->ce_namelen); oidcpy(&pc_item->ce->oid, &fixed_portion->oid); pc_item->id = fixed_portion->id; pc_item->ca.crlf_action = fixed_portion->crlf_action; pc_item->ca.ident = fixed_portion->ident; pc_item->ca.working_tree_encoding = encoding; } static void report_result(struct parallel_checkout_item *pc_item) { struct pc_item_result res = { 0 }; size_t size; res.id = pc_item->id; res.status = pc_item->status; if (pc_item->status == PC_ITEM_WRITTEN) { res.st = pc_item->st; size = sizeof(res); } else { size = PC_ITEM_RESULT_BASE_SIZE; } packet_write(1, (const char *)&res, size); } /* Free the worker-side malloced data, but not pc_item itself. */ static void release_pc_item_data(struct parallel_checkout_item *pc_item) { free((char *)pc_item->ca.working_tree_encoding); discard_cache_entry(pc_item->ce); } static void worker_loop(struct checkout *state) { struct parallel_checkout_item *items = NULL; size_t i, nr = 0, alloc = 0; while (1) { int len = packet_read(0, packet_buffer, sizeof(packet_buffer), 0); if (len < 0) BUG("packet_read() returned negative value"); else if (!len) break; ALLOC_GROW(items, nr + 1, alloc); packet_to_pc_item(packet_buffer, len, &items[nr++]); } for (i = 0; i < nr; i++) { struct parallel_checkout_item *pc_item = &items[i]; write_pc_item(pc_item, state); report_result(pc_item); release_pc_item_data(pc_item); } packet_flush(1); free(items); } static const char * const checkout_worker_usage[] = { N_("git checkout--worker []"), NULL }; int cmd_checkout__worker(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { struct checkout state = CHECKOUT_INIT; struct option checkout_worker_options[] = { OPT_STRING(0, "prefix", &state.base_dir, N_("string"), N_("when creating files, prepend ")), OPT_END() }; if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(checkout_worker_usage, checkout_worker_options); git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, checkout_worker_options, checkout_worker_usage, 0); if (argc > 0) usage_with_options(checkout_worker_usage, checkout_worker_options); if (state.base_dir) state.base_dir_len = strlen(state.base_dir); /* * Setting this on a worker won't actually update the index. We just * need to tell the checkout machinery to lstat() the written entries, * so that we can send this data back to the main process. */ state.refresh_cache = 1; worker_loop(&state); return 0; } git-cinnabar-0.7.0/git-core/builtin/checkout-index.c000064400000000000000000000222631046102023000204300ustar 00000000000000/* * Check-out files from the "current cache directory" * * Copyright (C) 2005 Linus Torvalds * */ #define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "builtin.h" #include "config.h" #include "gettext.h" #include "lockfile.h" #include "quote.h" #include "cache-tree.h" #include "parse-options.h" #include "entry.h" #include "parallel-checkout.h" #include "read-cache-ll.h" #include "setup.h" #include "sparse-index.h" #define CHECKOUT_ALL 4 static int nul_term_line; static int checkout_stage; /* default to checkout stage0 */ static int ignore_skip_worktree; /* default to 0 */ static int to_tempfile = -1; static char topath[4][TEMPORARY_FILENAME_LENGTH + 1]; static struct checkout state = CHECKOUT_INIT; static void write_tempfile_record(const char *name, const char *prefix) { int i; int have_tempname = 0; if (CHECKOUT_ALL == checkout_stage) { for (i = 1; i < 4; i++) if (topath[i][0]) { have_tempname = 1; break; } if (have_tempname) { for (i = 1; i < 4; i++) { if (i > 1) putchar(' '); if (topath[i][0]) fputs(topath[i], stdout); else putchar('.'); } } } else if (topath[checkout_stage][0]) { have_tempname = 1; fputs(topath[checkout_stage], stdout); } if (have_tempname) { putchar('\t'); write_name_quoted_relative(name, prefix, stdout, nul_term_line ? '\0' : '\n'); } for (i = 0; i < 4; i++) { topath[i][0] = 0; } } static int checkout_file(const char *name, const char *prefix) { int namelen = strlen(name); int pos = index_name_pos(the_repository->index, name, namelen); int has_same_name = 0; int is_file = 0; int is_skipped = 1; int did_checkout = 0; int errs = 0; if (pos < 0) pos = -pos - 1; while (pos index->cache_nr) { struct cache_entry *ce =the_repository->index->cache[pos]; if (ce_namelen(ce) != namelen || memcmp(ce->name, name, namelen)) break; has_same_name = 1; pos++; if (S_ISSPARSEDIR(ce->ce_mode)) break; is_file = 1; if (!ignore_skip_worktree && ce_skip_worktree(ce)) break; is_skipped = 0; if (ce_stage(ce) != checkout_stage && (CHECKOUT_ALL != checkout_stage || !ce_stage(ce))) continue; did_checkout = 1; if (checkout_entry(ce, &state, to_tempfile ? topath[ce_stage(ce)] : NULL, NULL) < 0) errs++; } if (did_checkout) { if (to_tempfile) write_tempfile_record(name, prefix); return errs > 0 ? -1 : 0; } /* * At this point we know we didn't try to check anything out. If it was * because we did find an entry but it was stage 0, that's not an * error. */ if (has_same_name && checkout_stage == CHECKOUT_ALL) return 0; if (!state.quiet) { fprintf(stderr, "git checkout-index: %s ", name); if (!has_same_name) fprintf(stderr, "is not in the cache"); else if (!is_file) fprintf(stderr, "is a sparse directory"); else if (is_skipped) fprintf(stderr, "has skip-worktree enabled; " "use '--ignore-skip-worktree-bits' to checkout"); else if (checkout_stage) fprintf(stderr, "does not exist at stage %d", checkout_stage); else fprintf(stderr, "is unmerged"); fputc('\n', stderr); } return -1; } static int checkout_all(const char *prefix, int prefix_length) { int i, errs = 0; struct cache_entry *last_ce = NULL; for (i = 0; i < the_repository->index->cache_nr ; i++) { struct cache_entry *ce = the_repository->index->cache[i]; if (S_ISSPARSEDIR(ce->ce_mode)) { if (!ce_skip_worktree(ce)) BUG("sparse directory '%s' does not have skip-worktree set", ce->name); /* * If the current entry is a sparse directory and skip-worktree * entries are being checked out, expand the index and continue * the loop on the current index position (now pointing to the * first entry inside the expanded sparse directory). */ if (ignore_skip_worktree) { ensure_full_index(the_repository->index); ce = the_repository->index->cache[i]; } } if (!ignore_skip_worktree && ce_skip_worktree(ce)) continue; if (ce_stage(ce) != checkout_stage && (CHECKOUT_ALL != checkout_stage || !ce_stage(ce))) continue; if (prefix && *prefix && (ce_namelen(ce) <= prefix_length || memcmp(prefix, ce->name, prefix_length))) continue; if (last_ce && to_tempfile) { if (ce_namelen(last_ce) != ce_namelen(ce) || memcmp(last_ce->name, ce->name, ce_namelen(ce))) write_tempfile_record(last_ce->name, prefix); } if (checkout_entry(ce, &state, to_tempfile ? topath[ce_stage(ce)] : NULL, NULL) < 0) errs++; last_ce = ce; } if (last_ce && to_tempfile) write_tempfile_record(last_ce->name, prefix); return !!errs; } static const char * const builtin_checkout_index_usage[] = { N_("git checkout-index [] [--] [...]"), NULL }; static int option_parse_stage(const struct option *opt, const char *arg, int unset) { int *stage = opt->value; BUG_ON_OPT_NEG(unset); if (!strcmp(arg, "all")) { *stage = CHECKOUT_ALL; } else { int ch = arg[0]; if ('1' <= ch && ch <= '3') *stage = arg[0] - '0'; else die(_("stage should be between 1 and 3 or all")); } return 0; } int cmd_checkout_index(int argc, const char **argv, const char *prefix, struct repository *repo UNUSED) { int i; struct lock_file lock_file = LOCK_INIT; int all = 0; int read_from_stdin = 0; int prefix_length; int force = 0, quiet = 0, not_new = 0; int index_opt = 0; int err = 0; int pc_workers, pc_threshold; struct option builtin_checkout_index_options[] = { OPT_BOOL('a', "all", &all, N_("check out all files in the index")), OPT_BOOL(0, "ignore-skip-worktree-bits", &ignore_skip_worktree, N_("do not skip files with skip-worktree set")), OPT__FORCE(&force, N_("force overwrite of existing files"), 0), OPT__QUIET(&quiet, N_("no warning for existing files and files not in index")), OPT_BOOL('n', "no-create", ¬_new, N_("don't checkout new files")), OPT_BOOL('u', "index", &index_opt, N_("update stat information in the index file")), OPT_BOOL('z', NULL, &nul_term_line, N_("paths are separated with NUL character")), OPT_BOOL(0, "stdin", &read_from_stdin, N_("read list of paths from the standard input")), OPT_BOOL(0, "temp", &to_tempfile, N_("write the content to temporary files")), OPT_STRING(0, "prefix", &state.base_dir, N_("string"), N_("when creating files, prepend ")), OPT_CALLBACK_F(0, "stage", &checkout_stage, "(1|2|3|all)", N_("copy out the files from named stage"), PARSE_OPT_NONEG, option_parse_stage), OPT_END() }; if (argc == 2 && !strcmp(argv[1], "-h")) usage_with_options(builtin_checkout_index_usage, builtin_checkout_index_options); git_config(git_default_config, NULL); prefix_length = prefix ? strlen(prefix) : 0; prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; if (repo_read_index(the_repository) < 0) { die("invalid cache"); } argc = parse_options(argc, argv, prefix, builtin_checkout_index_options, builtin_checkout_index_usage, 0); state.istate = the_repository->index; state.force = force; state.quiet = quiet; state.not_new = not_new; if (!state.base_dir) state.base_dir = ""; state.base_dir_len = strlen(state.base_dir); if (to_tempfile < 0) to_tempfile = (checkout_stage == CHECKOUT_ALL); if (!to_tempfile && checkout_stage == CHECKOUT_ALL) die(_("options '%s' and '%s' cannot be used together"), "--stage=all", "--no-temp"); /* * when --prefix is specified we do not want to update cache. */ if (index_opt && !state.base_dir_len && !to_tempfile) { state.refresh_cache = 1; state.istate = the_repository->index; repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR); } get_parallel_checkout_configs(&pc_workers, &pc_threshold); if (pc_workers > 1) init_parallel_checkout(); /* Check out named files first */ for (i = 0; i < argc; i++) { const char *arg = argv[i]; char *p; if (all) die("git checkout-index: don't mix '--all' and explicit filenames"); if (read_from_stdin) die("git checkout-index: don't mix '--stdin' and explicit filenames"); p = prefix_path(prefix, prefix_length, arg); err |= checkout_file(p, prefix); free(p); } if (read_from_stdin) { struct strbuf buf = STRBUF_INIT; struct strbuf unquoted = STRBUF_INIT; strbuf_getline_fn getline_fn; if (all) die("git checkout-index: don't mix '--all' and '--stdin'"); getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf; while (getline_fn(&buf, stdin) != EOF) { char *p; if (!nul_term_line && buf.buf[0] == '"') { strbuf_reset(&unquoted); if (unquote_c_style(&unquoted, buf.buf, NULL)) die("line is badly quoted"); strbuf_swap(&buf, &unquoted); } p = prefix_path(prefix, prefix_length, buf.buf); err |= checkout_file(p, prefix); free(p); } strbuf_release(&unquoted); strbuf_release(&buf); } if (all) err |= checkout_all(prefix, prefix_length); if (pc_workers > 1) err |= run_parallel_checkout(&state, pc_workers, pc_threshold, NULL, NULL); if (err) return 1; if (is_lock_file_locked(&lock_file) && write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK)) die("Unable to write new index file"); return 0; } git-cinnabar-0.7.0/git-core/builtin/checkout.c000064400000000000000000001763061046102023000173330ustar 00000000000000#define USE_THE_REPOSITORY_VARIABLE #define DISABLE_SIGN_COMPARE_WARNINGS #include "builtin.h" #include "advice.h" #include "branch.h" #include "cache-tree.h" #include "checkout.h" #include "commit.h" #include "config.h" #include "diff.h" #include "dir.h" #include "environment.h" #include "gettext.h" #include "hex.h" #include "hook.h" #include "merge-ll.h" #include "lockfile.h" #include "mem-pool.h" #include "merge-recursive.h" #include "object-name.h" #include "object-store-ll.h" #include "parse-options.h" #include "path.h" #include "preload-index.h" #include "read-cache.h" #include "refs.h" #include "remote.h" #include "repo-settings.h" #include "resolve-undo.h" #include "revision.h" #include "setup.h" #include "submodule.h" #include "symlinks.h" #include "trace2.h" #include "tree.h" #include "tree-walk.h" #include "unpack-trees.h" #include "wt-status.h" #include "xdiff-interface.h" #include "entry.h" #include "parallel-checkout.h" #include "add-interactive.h" static const char * const checkout_usage[] = { N_("git checkout [] "), N_("git checkout [] [] -- ..."), NULL, }; static const char * const switch_branch_usage[] = { N_("git switch [] []"), NULL, }; static const char * const restore_usage[] = { N_("git restore [] [--source=] ..."), NULL, }; struct checkout_opts { int patch_mode; int quiet; int merge; int force; int force_detach; int implicit_detach; int writeout_stage; int overwrite_ignore; int ignore_skipworktree; int ignore_other_worktrees; int show_progress; int count_checkout_paths; int overlay_mode; int dwim_new_local_branch; int discard_changes; int accept_ref; int accept_pathspec; int switch_branch_doing_nothing_is_ok; int only_merge_on_switching_branches; int can_switch_when_in_progress; int orphan_from_empty_tree; int empty_pathspec_ok; int checkout_index; int checkout_worktree; const char *ignore_unmerged_opt; int ignore_unmerged; int pathspec_file_nul; char *pathspec_from_file; const char *new_branch; const char *new_branch_force; const char *new_orphan_branch; int new_branch_log; enum branch_track track; struct diff_options diff_options; int conflict_style; int branch_exists; const char *prefix; struct pathspec pathspec; const char *from_treeish; struct tree *source_tree; }; #define CHECKOUT_OPTS_INIT { .conflict_style = -1, .merge = -1 } struct branch_info { char *name; /* The short name used */ char *path; /* The full name of a real branch */ struct commit *commit; /* The named commit */ char *refname; /* The full name of the ref being checked out. */ struct object_id oid; /* The object ID of the commit being checked out. */ /* * if not null the branch is detached because it's already * checked out in this checkout */ char *checkout; }; static void branch_info_release(struct branch_info *info) { free(info->name); free(info->path); free(info->refname); free(info->checkout); } static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit, int changed) { return run_hooks_l(the_repository, "post-checkout", oid_to_hex(old_commit ? &old_commit->object.oid : null_oid()), oid_to_hex(new_commit ? &new_commit->object.oid : null_oid()), changed ? "1" : "0", NULL); /* "new_commit" can be NULL when checking out from the index before a commit exists. */ } static int update_some(const struct object_id *oid, struct strbuf *base, const char *pathname, unsigned mode, void *context UNUSED) { int len; struct cache_entry *ce; int pos; if (S_ISDIR(mode)) return READ_TREE_RECURSIVE; len = base->len + strlen(pathname); ce = make_empty_cache_entry(the_repository->index, len); oidcpy(&ce->oid, oid); memcpy(ce->name, base->buf, base->len); memcpy(ce->name + base->len, pathname, len - base->len); ce->ce_flags = create_ce_flags(0) | CE_UPDATE; ce->ce_namelen = len; ce->ce_mode = create_ce_mode(mode); /* * If the entry is the same as the current index, we can leave the old * entry in place. Whether it is UPTODATE or not, checkout_entry will * do the right thing. */ pos = index_name_pos(the_repository->index, ce->name, ce->ce_namelen); if (pos >= 0) { struct cache_entry *old = the_repository->index->cache[pos]; if (ce->ce_mode == old->ce_mode && !ce_intent_to_add(old) && oideq(&ce->oid, &old->oid)) { old->ce_flags |= CE_UPDATE; discard_cache_entry(ce); return 0; } } add_index_entry(the_repository->index, ce, ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); return 0; } static int read_tree_some(struct tree *tree, const struct pathspec *pathspec) { read_tree(the_repository, tree, pathspec, update_some, NULL); /* update the index with the given tree's info * for all args, expanding wildcards, and exit * with any non-zero return code. */ return 0; } static int skip_same_name(const struct cache_entry *ce, int pos) { while (++pos < the_repository->index->cache_nr && !strcmp(the_repository->index->cache[pos]->name, ce->name)) ; /* skip */ return pos; } static int check_stage(int stage, const struct cache_entry *ce, int pos, int overlay_mode) { while (pos < the_repository->index->cache_nr && !strcmp(the_repository->index->cache[pos]->name, ce->name)) { if (ce_stage(the_repository->index->cache[pos]) == stage) return 0; pos++; } if (!overlay_mode) return 0; if (stage == 2) return error(_("path '%s' does not have our version"), ce->name); else return error(_("path '%s' does not have their version"), ce->name); } static int check_stages(unsigned stages, const struct cache_entry *ce, int pos) { unsigned seen = 0; const char *name = ce->name; while (pos < the_repository->index->cache_nr) { ce = the_repository->index->cache[pos]; if (strcmp(name, ce->name)) break; seen |= (1 << ce_stage(ce)); pos++; } if ((stages & seen) != stages) return error(_("path '%s' does not have all necessary versions"), name); return 0; } static int checkout_stage(int stage, const struct cache_entry *ce, int pos, const struct checkout *state, int *nr_checkouts, int overlay_mode) { while (pos < the_repository->index->cache_nr && !strcmp(the_repository->index->cache[pos]->name, ce->name)) { if (ce_stage(the_repository->index->cache[pos]) == stage) return checkout_entry(the_repository->index->cache[pos], state, NULL, nr_checkouts); pos++; } if (!overlay_mode) { unlink_entry(ce, NULL); return 0; } if (stage == 2) return error(_("path '%s' does not have our version"), ce->name); else return error(_("path '%s' does not have their version"), ce->name); } static int checkout_merged(int pos, const struct checkout *state, int *nr_checkouts, struct mem_pool *ce_mem_pool, int conflict_style) { struct cache_entry *ce = the_repository->index->cache[pos]; const char *path = ce->name; mmfile_t ancestor, ours, theirs; enum ll_merge_result merge_status; int status; struct object_id oid; mmbuffer_t result_buf; struct object_id threeway[3]; unsigned mode = 0; struct ll_merge_options ll_opts = LL_MERGE_OPTIONS_INIT; int renormalize = 0; memset(threeway, 0, sizeof(threeway)); while (pos < the_repository->index->cache_nr) { int stage; stage = ce_stage(ce); if (!stage || strcmp(path, ce->name)) break; oidcpy(&threeway[stage - 1], &ce->oid); if (stage == 2) mode = create_ce_mode(ce->ce_mode); pos++; ce = the_repository->index->cache[pos]; } if (is_null_oid(&threeway[1]) || is_null_oid(&threeway[2])) return error(_("path '%s' does not have necessary versions"), path); read_mmblob(&ancestor, &threeway[0]); read_mmblob(&ours, &threeway[1]); read_mmblob(&theirs, &threeway[2]); git_config_get_bool("merge.renormalize", &renormalize); ll_opts.renormalize = renormalize; ll_opts.conflict_style = conflict_style; merge_status = ll_merge(&result_buf, path, &ancestor, "base", &ours, "ours", &theirs, "theirs", state->istate, &ll_opts); free(ancestor.ptr); free(ours.ptr); free(theirs.ptr); if (merge_status == LL_MERGE_BINARY_CONFLICT) warning("Cannot merge binary files: %s (%s vs. %s)", path, "ours", "theirs"); if (merge_status < 0 || !result_buf.ptr) { free(result_buf.ptr); return error(_("path '%s': cannot merge"), path); } /* * NEEDSWORK: * There is absolutely no reason to write this as a blob object * and create a phony cache entry. This hack is primarily to get * to the write_entry() machinery that massages the contents to * work-tree format and writes out which only allows it for a * cache entry. The code in write_entry() needs to be refactored * to allow us to feed a instead of a cache * entry. Such a refactoring would help merge_recursive as well * (it also writes the merge result to the object database even * when it may contain conflicts). */ if (write_object_file(result_buf.ptr, result_buf.size, OBJ_BLOB, &oid)) die(_("Unable to add merge result for '%s'"), path); free(result_buf.ptr); ce = make_transient_cache_entry(mode, &oid, path, 2, ce_mem_pool); if (!ce) die(_("make_cache_entry failed for path '%s'"), path); status = checkout_entry(ce, state, NULL, nr_checkouts); return status; } static void mark_ce_for_checkout_overlay(struct cache_entry *ce, char *ps_matched, const struct checkout_opts *opts) { ce->ce_flags &= ~CE_MATCHED; if (!opts->ignore_skipworktree && ce_skip_worktree(ce)) return; if (opts->source_tree && !(ce->ce_flags & CE_UPDATE)) /* * "git checkout tree-ish -- path", but this entry * is in the original index but is not in tree-ish * or does not match the pathspec; it will not be * checked out to the working tree. We will not do * anything to this entry at all. */ return; /* * Either this entry came from the tree-ish we are * checking the paths out of, or we are checking out * of the index. * * If it comes from the tree-ish, we already know it * matches the pathspec and could just stamp * CE_MATCHED to it from update_some(). But we still * need ps_matched and read_tree (and * eventually tree_entry_interesting) cannot fill * ps_matched yet. Once it can, we can avoid calling * match_pathspec() for _all_ entries when * opts->source_tree != NULL. */ if (ce_path_match(the_repository->index, ce, &opts->pathspec, ps_matched)) ce->ce_flags |= CE_MATCHED; } static void mark_ce_for_checkout_no_overlay(struct cache_entry *ce, char *ps_matched, const struct checkout_opts *opts) { ce->ce_flags &= ~CE_MATCHED; if (!opts->ignore_skipworktree && ce_skip_worktree(ce)) return; if (ce_path_match(the_repository->index, ce, &opts->pathspec, ps_matched)) { ce->ce_flags |= CE_MATCHED; if (opts->source_tree && !(ce->ce_flags & CE_UPDATE)) /* * In overlay mode, but the path is not in * tree-ish, which means we should remove it * from the index and the working tree. */ ce->ce_flags |= CE_REMOVE | CE_WT_REMOVE; } } static int checkout_worktree(const struct checkout_opts *opts, const struct branch_info *info) { struct checkout state = CHECKOUT_INIT; int nr_checkouts = 0, nr_unmerged = 0; int errs = 0; int pos; int pc_workers, pc_threshold; struct mem_pool ce_mem_pool; state.force = 1; state.refresh_cache = 1; state.istate = the_repository->index; mem_pool_init(&ce_mem_pool, 0); get_parallel_checkout_configs(&pc_workers, &pc_threshold); init_checkout_metadata(&state.meta, info->refname, info->commit ? &info->commit->object.oid : &info->oid, NULL); enable_delayed_checkout(&state); if (pc_workers > 1) init_parallel_checkout(); enable_fscache(the_repository->index->cache_nr); for (pos = 0; pos < the_repository->index->cache_nr; pos++) { struct cache_entry *ce = the_repository->index->cache[pos]; if (ce->ce_flags & CE_MATCHED) { if (!ce_stage(ce)) { errs |= checkout_entry(ce, &state, NULL, &nr_checkouts); continue; } if (opts->writeout_stage) errs |= checkout_stage(opts->writeout_stage, ce, pos, &state, &nr_checkouts, opts->overlay_mode); else if (opts->merge) errs |= checkout_merged(pos, &state, &nr_unmerged, &ce_mem_pool, opts->conflict_style); pos = skip_same_name(ce, pos) - 1; } } if (pc_workers > 1) errs |= run_parallel_checkout(&state, pc_workers, pc_threshold, NULL, NULL); mem_pool_discard(&ce_mem_pool, should_validate_cache_entries()); disable_fscache(); remove_marked_cache_entries(the_repository->index, 1); remove_scheduled_dirs(); errs |= finish_delayed_checkout(&state, opts->show_progress); if (opts->count_checkout_paths) { if (nr_unmerged) fprintf_ln(stderr, Q_("Recreated %d merge conflict", "Recreated %d merge conflicts", nr_unmerged), nr_unmerged); if (opts->source_tree) fprintf_ln(stderr, Q_("Updated %d path from %s", "Updated %d paths from %s", nr_checkouts), nr_checkouts, repo_find_unique_abbrev(the_repository, &opts->source_tree->object.oid, DEFAULT_ABBREV)); else if (!nr_unmerged || nr_checkouts) fprintf_ln(stderr, Q_("Updated %d path from the index", "Updated %d paths from the index", nr_checkouts), nr_checkouts); } return errs; } static int checkout_paths(const struct checkout_opts *opts, const struct branch_info *new_branch_info) { int pos; static char *ps_matched; struct object_id rev; struct commit *head; int errs = 0; struct lock_file lock_file = LOCK_INIT; int checkout_index; trace2_cmd_mode(opts->patch_mode ? "patch" : "path"); if (opts->track != BRANCH_TRACK_UNSPECIFIED) die(_("'%s' cannot be used with updating paths"), "--track"); if (opts->new_branch_log) die(_("'%s' cannot be used with updating paths"), "-l"); if (opts->ignore_unmerged && opts->patch_mode) die(_("'%s' cannot be used with updating paths"), opts->ignore_unmerged_opt); if (opts->force_detach) die(_("'%s' cannot be used with updating paths"), "--detach"); if (opts->merge && opts->patch_mode) die(_("options '%s' and '%s' cannot be used together"), "--merge", "--patch"); if (opts->ignore_unmerged && opts->merge) die(_("options '%s' and '%s' cannot be used together"), opts->ignore_unmerged_opt, "-m"); if (opts->new_branch) die(_("Cannot update paths and switch to branch '%s' at the same time."), opts->new_branch); if (!opts->checkout_worktree && !opts->checkout_index) die(_("neither '%s' or '%s' is specified"), "--staged", "--worktree"); if (!opts->checkout_worktree && !opts->from_treeish) die(_("'%s' must be used when '%s' is not specified"), "--worktree", "--source"); /* * Reject --staged option to the restore command when combined with * merge-related options. Use the accept_ref flag to distinguish it * from the checkout command, which does not accept --staged anyway. * * `restore --ours|--theirs --worktree --staged` could mean resolving * conflicted paths to one side in both the worktree and the index, * but does not currently. * * `restore --merge|--conflict=